universal-mcp-agents 0.1.18__tar.gz → 0.1.19rc1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of universal-mcp-agents might be problematic. Click here for more details.
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/PKG-INFO +1 -1
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/pyproject.toml +2 -2
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/src/universal_mcp/agents/__init__.py +3 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/src/universal_mcp/agents/codeact0/__main__.py +0 -6
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/src/universal_mcp/agents/codeact0/llm_tool.py +1 -103
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/src/universal_mcp/agents/codeact0/playbook_agent.py +1 -1
- universal_mcp_agents-0.1.19rc1/src/universal_mcp/agents/sandbox.py +90 -0
- universal_mcp_agents-0.1.19rc1/src/universal_mcp/agents/unified/README.md +45 -0
- universal_mcp_agents-0.1.19rc1/src/universal_mcp/agents/unified/__init__.py +3 -0
- universal_mcp_agents-0.1.19rc1/src/universal_mcp/agents/unified/__main__.py +28 -0
- universal_mcp_agents-0.1.19rc1/src/universal_mcp/agents/unified/agent.py +289 -0
- universal_mcp_agents-0.1.19rc1/src/universal_mcp/agents/unified/langgraph_agent.py +14 -0
- universal_mcp_agents-0.1.19rc1/src/universal_mcp/agents/unified/llm_tool.py +25 -0
- universal_mcp_agents-0.1.19rc1/src/universal_mcp/agents/unified/prompts.py +192 -0
- universal_mcp_agents-0.1.19rc1/src/universal_mcp/agents/unified/sandbox.py +101 -0
- universal_mcp_agents-0.1.19rc1/src/universal_mcp/agents/unified/state.py +42 -0
- universal_mcp_agents-0.1.19rc1/src/universal_mcp/agents/unified/tools.py +188 -0
- universal_mcp_agents-0.1.19rc1/src/universal_mcp/agents/unified/utils.py +388 -0
- universal_mcp_agents-0.1.19rc1/src/universal_mcp/applications/filesystem/__init__.py +0 -0
- universal_mcp_agents-0.1.19rc1/src/universal_mcp/applications/filesystem/app.py +160 -0
- universal_mcp_agents-0.1.19rc1/src/universal_mcp/applications/llm/__init__.py +3 -0
- universal_mcp_agents-0.1.19rc1/src/universal_mcp/applications/llm/app.py +300 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/uv.lock +69 -69
- universal_mcp_agents-0.1.18/src/universal_mcp/applications/llm/__init__.py +0 -3
- universal_mcp_agents-0.1.18/src/universal_mcp/applications/llm/app.py +0 -158
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/.github/workflows/evals.yml +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/.github/workflows/lint.yml +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/.github/workflows/release-please.yml +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/.github/workflows/tests.yml +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/.gitignore +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/.pre-commit-config.yaml +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/GEMINI.md +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/PROMPTS.md +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/README.md +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/bump_and_release.sh +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/src/evals/__init__.py +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/src/evals/dataset.py +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/src/evals/datasets/codeact.jsonl +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/src/evals/datasets/exact.jsonl +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/src/evals/datasets/tasks.jsonl +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/src/evals/evaluators.py +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/src/evals/prompts.py +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/src/evals/run.py +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/src/evals/utils.py +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/src/tests/test_agents.py +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/src/universal_mcp/agents/base.py +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/src/universal_mcp/agents/bigtool/__init__.py +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/src/universal_mcp/agents/bigtool/__main__.py +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/src/universal_mcp/agents/bigtool/agent.py +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/src/universal_mcp/agents/bigtool/context.py +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/src/universal_mcp/agents/bigtool/graph.py +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/src/universal_mcp/agents/bigtool/prompts.py +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/src/universal_mcp/agents/bigtool/state.py +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/src/universal_mcp/agents/bigtool/tools.py +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/src/universal_mcp/agents/builder/__main__.py +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/src/universal_mcp/agents/builder/builder.py +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/src/universal_mcp/agents/builder/helper.py +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/src/universal_mcp/agents/builder/prompts.py +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/src/universal_mcp/agents/builder/state.py +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/src/universal_mcp/agents/cli.py +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/src/universal_mcp/agents/codeact/__init__.py +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/src/universal_mcp/agents/codeact/__main__.py +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/src/universal_mcp/agents/codeact/agent.py +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/src/universal_mcp/agents/codeact/models.py +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/src/universal_mcp/agents/codeact/prompts.py +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/src/universal_mcp/agents/codeact/sandbox.py +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/src/universal_mcp/agents/codeact/state.py +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/src/universal_mcp/agents/codeact/utils.py +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/src/universal_mcp/agents/codeact0/__init__.py +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/src/universal_mcp/agents/codeact0/agent.py +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/src/universal_mcp/agents/codeact0/config.py +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/src/universal_mcp/agents/codeact0/langgraph_agent.py +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/src/universal_mcp/agents/codeact0/prompts.py +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/src/universal_mcp/agents/codeact0/sandbox.py +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/src/universal_mcp/agents/codeact0/state.py +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/src/universal_mcp/agents/codeact0/tools.py +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/src/universal_mcp/agents/codeact0/utils.py +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/src/universal_mcp/agents/hil.py +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/src/universal_mcp/agents/llm.py +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/src/universal_mcp/agents/react.py +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/src/universal_mcp/agents/shared/__main__.py +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/src/universal_mcp/agents/shared/prompts.py +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/src/universal_mcp/agents/shared/tool_node.py +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/src/universal_mcp/agents/simple.py +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/src/universal_mcp/agents/utils.py +0 -0
- {universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/src/universal_mcp/applications/ui/app.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: universal-mcp-agents
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.19rc1
|
|
4
4
|
Summary: Add your description here
|
|
5
5
|
Project-URL: Homepage, https://github.com/universal-mcp/applications
|
|
6
6
|
Project-URL: Repository, https://github.com/universal-mcp/applications
|
|
@@ -6,7 +6,7 @@ build-backend = "hatchling.build"
|
|
|
6
6
|
|
|
7
7
|
[project]
|
|
8
8
|
name = "universal-mcp-agents"
|
|
9
|
-
version = "0.1.
|
|
9
|
+
version = "0.1.19-rc1"
|
|
10
10
|
description = "Add your description here"
|
|
11
11
|
readme = "README.md"
|
|
12
12
|
authors = [
|
|
@@ -70,7 +70,7 @@ lint.ignore = [
|
|
|
70
70
|
|
|
71
71
|
[tool.ruff.lint.pylint]
|
|
72
72
|
max-args = 10
|
|
73
|
-
max-statements =
|
|
73
|
+
max-statements = 118
|
|
74
74
|
max-returns = 10
|
|
75
75
|
max-branches = 37
|
|
76
76
|
|
{universal_mcp_agents-0.1.18 → universal_mcp_agents-0.1.19rc1}/src/universal_mcp/agents/__init__.py
RENAMED
|
@@ -7,6 +7,7 @@ from universal_mcp.agents.codeact import CodeActAgent as CodeActScript
|
|
|
7
7
|
from universal_mcp.agents.codeact0 import CodeActPlaybookAgent as CodeActRepl
|
|
8
8
|
from universal_mcp.agents.react import ReactAgent
|
|
9
9
|
from universal_mcp.agents.simple import SimpleAgent
|
|
10
|
+
from universal_mcp.agents.unified import UnifiedAgent
|
|
10
11
|
|
|
11
12
|
|
|
12
13
|
def get_agent(agent_name: Literal["react", "simple", "builder", "bigtool", "codeact-script", "codeact-repl"]):
|
|
@@ -22,6 +23,8 @@ def get_agent(agent_name: Literal["react", "simple", "builder", "bigtool", "code
|
|
|
22
23
|
return CodeActScript
|
|
23
24
|
elif agent_name == "codeact-repl":
|
|
24
25
|
return CodeActRepl
|
|
26
|
+
elif agent_name == "unified":
|
|
27
|
+
return UnifiedAgent
|
|
25
28
|
else:
|
|
26
29
|
raise ValueError(
|
|
27
30
|
f"Unknown agent: {agent_name}. Possible values: react, simple, builder, bigtool, codeact-script, codeact-repl"
|
|
@@ -19,12 +19,6 @@ async def main():
|
|
|
19
19
|
memory=memory,
|
|
20
20
|
)
|
|
21
21
|
print("Starting agent...")
|
|
22
|
-
# await agent.ainit()
|
|
23
|
-
# await agent.run_interactive()
|
|
24
|
-
# async for event in agent.stream(
|
|
25
|
-
# user_input="Fetch unsubscribe links from my Gmail inbox for promo emails I have received in the last 7 days"
|
|
26
|
-
# ):
|
|
27
|
-
# print(event.content, end="")
|
|
28
22
|
result = await agent.invoke(
|
|
29
23
|
user_input="Fetch unsubscribe links from my Gmail inbox for promo emails I have received in the last 7 days"
|
|
30
24
|
)
|
|
@@ -27,7 +27,7 @@ def smart_print(data: Any) -> None:
|
|
|
27
27
|
Args:
|
|
28
28
|
data: Either a dictionary with string keys, or a list of such dictionaries
|
|
29
29
|
"""
|
|
30
|
-
print(light_copy(data)) # noqa
|
|
30
|
+
print(light_copy(data)) # noqa: T201
|
|
31
31
|
|
|
32
32
|
|
|
33
33
|
def creative_writer(
|
|
@@ -275,105 +275,3 @@ def data_extractor(
|
|
|
275
275
|
.invoke(prompt)
|
|
276
276
|
)
|
|
277
277
|
return cast(dict[str, Any], response)
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
# news_articles_schema = {
|
|
281
|
-
# "type": "object",
|
|
282
|
-
# "properties": {
|
|
283
|
-
# "articles": {
|
|
284
|
-
# "type": "array",
|
|
285
|
-
# "title": "Articles",
|
|
286
|
-
# "description": "List of news articles",
|
|
287
|
-
# "items": {
|
|
288
|
-
# "type": "object",
|
|
289
|
-
# "properties": {
|
|
290
|
-
# "headline": {
|
|
291
|
-
# "type": "string",
|
|
292
|
-
# "title": "Headline",
|
|
293
|
-
# "description": "The headline of the news article"
|
|
294
|
-
# },
|
|
295
|
-
# "url": {
|
|
296
|
-
# "type": "string",
|
|
297
|
-
# "title": "URL",
|
|
298
|
-
# "description": "The URL of the news article"
|
|
299
|
-
# }
|
|
300
|
-
# },
|
|
301
|
-
# "required": ["headline", "url"],
|
|
302
|
-
# }
|
|
303
|
-
# }
|
|
304
|
-
# },
|
|
305
|
-
# "required": ["articles"],
|
|
306
|
-
# }
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
# news_articles_schema = {
|
|
310
|
-
# "title": "NewsArticleList",
|
|
311
|
-
# "description": "A list of news articles with headlines and URLs",
|
|
312
|
-
# "type": "object",
|
|
313
|
-
# "properties": {
|
|
314
|
-
# "articles": {
|
|
315
|
-
# "type": "array",
|
|
316
|
-
# "items": {
|
|
317
|
-
# "type": "object",
|
|
318
|
-
# "properties": {
|
|
319
|
-
# "headline": {
|
|
320
|
-
# "type": "string"
|
|
321
|
-
# },
|
|
322
|
-
# "url": {
|
|
323
|
-
# "type": "string"
|
|
324
|
-
# }
|
|
325
|
-
# },
|
|
326
|
-
# "required": ["headline", "url"]
|
|
327
|
-
# }
|
|
328
|
-
# }
|
|
329
|
-
# },
|
|
330
|
-
# "required": ["articles"]
|
|
331
|
-
# }
|
|
332
|
-
# model = init_chat_model(model="claude-4-sonnet-20250514", temperature=0)
|
|
333
|
-
# structured_model = model.with_structured_output(news_articles_schema)
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
# class TwitterComment(BaseModel):
|
|
337
|
-
# skip: bool
|
|
338
|
-
# reason: str
|
|
339
|
-
# comment: str
|
|
340
|
-
|
|
341
|
-
# twitter_comment_schema = {
|
|
342
|
-
# "title": "TwitterComment",
|
|
343
|
-
# "description": "A twitter comment to engage with followers",
|
|
344
|
-
# "type": "object",
|
|
345
|
-
# "properties": {
|
|
346
|
-
# "skip": {
|
|
347
|
-
# "type": "boolean"
|
|
348
|
-
# },
|
|
349
|
-
# "reason": {
|
|
350
|
-
# "type": "string"
|
|
351
|
-
# },
|
|
352
|
-
# "comment": {
|
|
353
|
-
# "type": "string"
|
|
354
|
-
# },
|
|
355
|
-
# "tagged_profiles": {
|
|
356
|
-
# "type": "array",
|
|
357
|
-
# "items": {
|
|
358
|
-
# "type": "string"
|
|
359
|
-
# }
|
|
360
|
-
# }
|
|
361
|
-
# },
|
|
362
|
-
# "required": ["skip", "reason"]
|
|
363
|
-
# }
|
|
364
|
-
|
|
365
|
-
# comment = {
|
|
366
|
-
# "tweet_id": "08109402",
|
|
367
|
-
# "handle": "@iamnishant",
|
|
368
|
-
# "text": "Hey really loved this tweet! Well said 💯"
|
|
369
|
-
# }
|
|
370
|
-
|
|
371
|
-
# comment_instructions = (
|
|
372
|
-
# "Goal is to engage with my twitter followers who have commented on my tweets."
|
|
373
|
-
# "Please generate a single line, context-aware, conversational reply for the given comment."
|
|
374
|
-
# "- Use social media language (can use hinglish)."
|
|
375
|
-
# "- Skip the reply, if the comment is too generic."
|
|
376
|
-
# "- Also tag relevant people in the reply."
|
|
377
|
-
# )
|
|
378
|
-
|
|
379
|
-
# my_reply = call_llm(comment_instructions, comment, twitter_comment_schema)
|
|
@@ -81,7 +81,7 @@ class CodeActPlaybookAgent(BaseAgent):
|
|
|
81
81
|
memory=memory,
|
|
82
82
|
**kwargs,
|
|
83
83
|
)
|
|
84
|
-
self.model_instance = load_chat_model(model
|
|
84
|
+
self.model_instance = load_chat_model(model)
|
|
85
85
|
self.tools_config = tools or []
|
|
86
86
|
self.registry = registry
|
|
87
87
|
self.playbook_registry = playbook_registry
|
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
import contextlib
|
|
2
|
+
import inspect
|
|
3
|
+
import io
|
|
4
|
+
import queue
|
|
5
|
+
import re
|
|
6
|
+
import socket
|
|
7
|
+
import threading
|
|
8
|
+
import types
|
|
9
|
+
from typing import Any
|
|
10
|
+
|
|
11
|
+
from universal_mcp.agents.codeact0.utils import derive_context
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class Sandbox:
|
|
15
|
+
"""
|
|
16
|
+
A class to execute code safely in a sandboxed environment with a timeout.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
def __init__(self, timeout: int = 180):
|
|
20
|
+
"""
|
|
21
|
+
Initializes the Sandbox.
|
|
22
|
+
Args:
|
|
23
|
+
timeout: The timeout for code execution in seconds.
|
|
24
|
+
"""
|
|
25
|
+
self.timeout = timeout
|
|
26
|
+
self._locals: dict[str, Any] = {}
|
|
27
|
+
self.add_context: dict[str, Any] = {}
|
|
28
|
+
|
|
29
|
+
def run(self, code: str) -> tuple[str, dict[str, Any], dict[str, Any]]:
|
|
30
|
+
"""
|
|
31
|
+
Execute code safely with a timeout.
|
|
32
|
+
- Returns (output_str, filtered_locals_dict, new_add_context)
|
|
33
|
+
- Errors or timeout are returned as output_str.
|
|
34
|
+
- Previous variables in _locals persist across calls.
|
|
35
|
+
"""
|
|
36
|
+
|
|
37
|
+
EXCLUDE_TYPES = (
|
|
38
|
+
types.ModuleType,
|
|
39
|
+
type(re.match("", "")),
|
|
40
|
+
type(threading.Lock()),
|
|
41
|
+
type(threading.RLock()),
|
|
42
|
+
threading.Event,
|
|
43
|
+
threading.Condition,
|
|
44
|
+
threading.Semaphore,
|
|
45
|
+
queue.Queue,
|
|
46
|
+
socket.socket,
|
|
47
|
+
io.IOBase,
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
result_container = {"output": "<no output>"}
|
|
51
|
+
|
|
52
|
+
def target():
|
|
53
|
+
try:
|
|
54
|
+
with contextlib.redirect_stdout(io.StringIO()) as f:
|
|
55
|
+
exec(code, self._locals, self._locals)
|
|
56
|
+
result_container["output"] = f.getvalue() or "<code ran, no output printed to stdout>"
|
|
57
|
+
except Exception as e:
|
|
58
|
+
result_container["output"] = "Error during execution: " + str(e)
|
|
59
|
+
|
|
60
|
+
thread = threading.Thread(target=target)
|
|
61
|
+
thread.start()
|
|
62
|
+
thread.join(self.timeout)
|
|
63
|
+
|
|
64
|
+
if thread.is_alive():
|
|
65
|
+
result_container["output"] = f"Code timeout: code execution exceeded {self.timeout} seconds."
|
|
66
|
+
|
|
67
|
+
# Filter locals for picklable/storable variables
|
|
68
|
+
all_vars = {}
|
|
69
|
+
for key, value in self._locals.items():
|
|
70
|
+
if key == "__builtins__":
|
|
71
|
+
continue
|
|
72
|
+
if inspect.iscoroutine(value) or inspect.iscoroutinefunction(value):
|
|
73
|
+
continue
|
|
74
|
+
if inspect.isasyncgen(value) or inspect.isasyncgenfunction(value):
|
|
75
|
+
continue
|
|
76
|
+
if isinstance(value, EXCLUDE_TYPES):
|
|
77
|
+
continue
|
|
78
|
+
if not callable(value) or not hasattr(value, "__name__"):
|
|
79
|
+
all_vars[key] = value
|
|
80
|
+
|
|
81
|
+
self._locals = all_vars
|
|
82
|
+
|
|
83
|
+
# Safely derive context
|
|
84
|
+
try:
|
|
85
|
+
self.add_context = derive_context(code, self.add_context)
|
|
86
|
+
except Exception:
|
|
87
|
+
# Keep the old context if derivation fails
|
|
88
|
+
pass
|
|
89
|
+
|
|
90
|
+
return result_container["output"], self._locals, self.add_context
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
# Unified Agent
|
|
2
|
+
|
|
3
|
+
The Unified Agent is a sophisticated AI assistant designed to understand and execute tasks by writing and running Python code. It operates within a secure sandbox environment and can leverage a variety of tools to interact with external systems and perform complex operations. A key feature of the Unified Agent is its ability to create reusable "playbooks" from user workflows, enabling automation of repeated tasks.
|
|
4
|
+
|
|
5
|
+
## Architecture
|
|
6
|
+
|
|
7
|
+
The agent's architecture is built upon the LangGraph library, creating a state machine that cycles between thinking (calling a language model) and acting (executing code or tools).
|
|
8
|
+
|
|
9
|
+
### Core Components:
|
|
10
|
+
|
|
11
|
+
* **`UnifiedAgent`**: The fundamental agent implementation. It processes user requests, writes Python code, and executes it in a sandbox to achieve the desired outcome. It also has a "playbook mode" to generate reusable Python functions from a user's workflow.
|
|
12
|
+
* **State Graph (`CodeActState`)**: The agent's logic is defined as a state graph. The primary nodes are:
|
|
13
|
+
* `call_model`: Invokes the language model to generate Python code or select a tool based on the current state and user input.
|
|
14
|
+
* `sandbox`: Executes the generated Python code using a safe `eval` function with a timeout. The results and any errors are captured and fed back into the state.
|
|
15
|
+
* `execute_tools`: Handles the execution of meta-tools for searching, loading, and interacting with external functions.
|
|
16
|
+
* `playbook`: Manages the playbook creation process, including planning, user confirmation, and code generation.
|
|
17
|
+
* **Sandbox (`sandbox.py`)**: A secure execution environment that runs Python code in a separate thread with a timeout. It ensures that the agent's code execution is isolated and cannot harm the host system.
|
|
18
|
+
* **Tools**: The agent has access to a set of powerful tools:
|
|
19
|
+
* `execute_ipython_cell`: The primary tool for executing arbitrary Python code snippets.
|
|
20
|
+
* **AI Functions (`llm_tool.py`)**: A suite of functions (`generate_text`, `classify_data`, `extract_data`, `call_llm`) that allow the agent to delegate complex reasoning, classification, and data extraction tasks to a language model.
|
|
21
|
+
* **Meta Tools (`tools.py`)**: Functions like `search_functions` and `load_functions` that enable the agent to dynamically discover and load new tools from a `ToolRegistry`.
|
|
22
|
+
|
|
23
|
+
## Playbook Mode
|
|
24
|
+
|
|
25
|
+
A key feature of the Unified Agent is its ability to create reusable "playbooks". When a user performs a task that they might want to repeat in the future, they can trigger the playbook mode. The agent will then:
|
|
26
|
+
|
|
27
|
+
1. **Plan:** Analyze the workflow and create a step-by-step plan for a reusable function, identifying user-specific variables that should become function parameters.
|
|
28
|
+
2. **Confirm:** Ask the user for confirmation of the generated plan.
|
|
29
|
+
3. **Generate:** Generate a Python function based on the confirmed plan. This function can be saved and executed later to automate the task.
|
|
30
|
+
|
|
31
|
+
## Getting Started (`__main__.py`)
|
|
32
|
+
|
|
33
|
+
The `__main__.py` file serves as a simple command-line interface for interacting with the agent. It demonstrates how to instantiate the `UnifiedAgent`, configure it with tools, and invoke it with a user request. This allows for easy testing and experimentation with the agent's capabilities.
|
|
34
|
+
|
|
35
|
+
To run the agent, execute the following command from the root of the repository:
|
|
36
|
+
```bash
|
|
37
|
+
uv run python -m src.universal_mcp.agents.unified.__main__
|
|
38
|
+
```
|
|
39
|
+
|
|
40
|
+
Major TODO:
|
|
41
|
+
- [] Improve LLM Tools
|
|
42
|
+
- [] Use smaller dedicated models for universal_write, clasify etc
|
|
43
|
+
- Improve Sandbox
|
|
44
|
+
- [] Support saving loading context
|
|
45
|
+
- [] Direct async tool support
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
|
|
3
|
+
from langgraph.checkpoint.memory import MemorySaver
|
|
4
|
+
from rich import print
|
|
5
|
+
from universal_mcp.agentr.registry import AgentrRegistry
|
|
6
|
+
|
|
7
|
+
from universal_mcp.agents.unified import UnifiedAgent
|
|
8
|
+
from universal_mcp.agents.utils import messages_to_list
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
async def main():
|
|
12
|
+
memory = MemorySaver()
|
|
13
|
+
default_tools = {"llm": ["generate_text", "classify_data", "extract_data", "call_llm"]}
|
|
14
|
+
agent = UnifiedAgent(
|
|
15
|
+
name="CodeAct Agent",
|
|
16
|
+
instructions="Be very concise in your answers.",
|
|
17
|
+
model="anthropic:claude-4-sonnet-20250514",
|
|
18
|
+
tools=default_tools,
|
|
19
|
+
registry=AgentrRegistry(),
|
|
20
|
+
memory=memory,
|
|
21
|
+
)
|
|
22
|
+
print("Starting agent...")
|
|
23
|
+
result = await agent.invoke(user_input="find the 80th fibonnaci number")
|
|
24
|
+
print(messages_to_list(result["messages"]))
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
if __name__ == "__main__":
|
|
28
|
+
asyncio.run(main())
|
|
@@ -0,0 +1,289 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import re
|
|
3
|
+
from typing import Literal, cast
|
|
4
|
+
|
|
5
|
+
from langchain_core.messages import AIMessage, ToolMessage
|
|
6
|
+
from langchain_core.tools import StructuredTool
|
|
7
|
+
from langchain_core.tools import tool as create_tool
|
|
8
|
+
from langgraph.checkpoint.base import BaseCheckpointSaver
|
|
9
|
+
from langgraph.graph import START, StateGraph
|
|
10
|
+
from langgraph.types import Command, RetryPolicy
|
|
11
|
+
from loguru import logger
|
|
12
|
+
from universal_mcp.tools.registry import ToolRegistry
|
|
13
|
+
from universal_mcp.types import ToolConfig, ToolFormat
|
|
14
|
+
|
|
15
|
+
from universal_mcp.agents.base import BaseAgent
|
|
16
|
+
from universal_mcp.agents.llm import load_chat_model
|
|
17
|
+
from universal_mcp.agents.utils import convert_tool_ids_to_dict, filter_retry_on, get_message_text
|
|
18
|
+
|
|
19
|
+
from .llm_tool import smart_print
|
|
20
|
+
from .prompts import (
|
|
21
|
+
PLAYBOOK_CONFIRMING_PROMPT,
|
|
22
|
+
PLAYBOOK_GENERATING_PROMPT,
|
|
23
|
+
PLAYBOOK_PLANNING_PROMPT,
|
|
24
|
+
create_default_prompt,
|
|
25
|
+
)
|
|
26
|
+
from .sandbox import eval_unsafe
|
|
27
|
+
from .state import CodeActState
|
|
28
|
+
from .tools import create_meta_tools, enter_playbook_mode, get_valid_tools
|
|
29
|
+
from .utils import inject_context, smart_truncate
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class UnifiedAgent(BaseAgent):
|
|
33
|
+
def __init__(
|
|
34
|
+
self,
|
|
35
|
+
name: str,
|
|
36
|
+
instructions: str,
|
|
37
|
+
model: str,
|
|
38
|
+
memory: BaseCheckpointSaver | None = None,
|
|
39
|
+
tools: ToolConfig | None = None,
|
|
40
|
+
registry: ToolRegistry | None = None,
|
|
41
|
+
playbook_registry: object | None = None,
|
|
42
|
+
sandbox_timeout: int = 20,
|
|
43
|
+
**kwargs,
|
|
44
|
+
):
|
|
45
|
+
super().__init__(
|
|
46
|
+
name=name,
|
|
47
|
+
instructions=instructions,
|
|
48
|
+
model=model,
|
|
49
|
+
memory=memory,
|
|
50
|
+
**kwargs,
|
|
51
|
+
)
|
|
52
|
+
self.model_instance = load_chat_model(model)
|
|
53
|
+
self.tools_config = tools or {}
|
|
54
|
+
self.registry = registry
|
|
55
|
+
self.playbook_registry = playbook_registry
|
|
56
|
+
self.sandbox_timeout = sandbox_timeout
|
|
57
|
+
self.eval_fn = eval_unsafe
|
|
58
|
+
if self.tools_config and not self.registry:
|
|
59
|
+
raise ValueError("Registry must be provided with tools")
|
|
60
|
+
|
|
61
|
+
async def _build_graph(self): # noqa: PLR0915
|
|
62
|
+
meta_tools = create_meta_tools(self.registry)
|
|
63
|
+
additional_tools = [smart_print, meta_tools["web_search"]]
|
|
64
|
+
self.additional_tools = [t if isinstance(t, StructuredTool) else create_tool(t) for t in additional_tools]
|
|
65
|
+
self.default_tools = await self.registry.export_tools(self.tools_config, ToolFormat.LANGCHAIN)
|
|
66
|
+
|
|
67
|
+
async def call_model(state: CodeActState) -> Command[Literal["sandbox", "execute_tools"]]:
|
|
68
|
+
self.exported_tools = []
|
|
69
|
+
|
|
70
|
+
selected_tool_ids = state.get("selected_tool_ids", [])
|
|
71
|
+
self.exported_tools = await self.registry.export_tools(selected_tool_ids, ToolFormat.LANGCHAIN)
|
|
72
|
+
all_tools = self.exported_tools + self.additional_tools
|
|
73
|
+
self.final_instructions, self.tools_context = create_default_prompt(all_tools, self.instructions)
|
|
74
|
+
messages = [{"role": "user", "content": self.final_instructions}] + state["messages"]
|
|
75
|
+
|
|
76
|
+
if state.get("output"):
|
|
77
|
+
messages.append(
|
|
78
|
+
{
|
|
79
|
+
"role": "system",
|
|
80
|
+
"content": f"The last code execution resulted in this output:\n{state['output']}",
|
|
81
|
+
}
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
# Run the model and potentially loop for reflection
|
|
85
|
+
model_with_tools = self.model_instance.bind_tools(
|
|
86
|
+
tools=[
|
|
87
|
+
enter_playbook_mode,
|
|
88
|
+
meta_tools["search_functions"],
|
|
89
|
+
meta_tools["load_functions"],
|
|
90
|
+
],
|
|
91
|
+
tool_choice="auto",
|
|
92
|
+
)
|
|
93
|
+
response = cast(AIMessage, model_with_tools.invoke(messages))
|
|
94
|
+
response_text = get_message_text(response)
|
|
95
|
+
code_match = re.search(r"```python\n(.*?)\n```", response_text, re.DOTALL)
|
|
96
|
+
|
|
97
|
+
if code_match:
|
|
98
|
+
code = code_match.group(1).strip()
|
|
99
|
+
return Command(goto="sandbox", update={"messages": [response], "code": code, "output": ""})
|
|
100
|
+
elif response.tool_calls:
|
|
101
|
+
return Command(goto="execute_tools", update={"messages": [response]})
|
|
102
|
+
else:
|
|
103
|
+
return Command(update={"messages": [response]})
|
|
104
|
+
|
|
105
|
+
async def execute_tools(state: CodeActState) -> Command[Literal["call_model", "playbook", "sandbox"]]:
|
|
106
|
+
"""Execute tool calls"""
|
|
107
|
+
last_message = state["messages"][-1]
|
|
108
|
+
tool_calls = last_message.tool_calls if isinstance(last_message, AIMessage) else []
|
|
109
|
+
|
|
110
|
+
tool_messages = []
|
|
111
|
+
new_tool_ids = []
|
|
112
|
+
ask_user = False
|
|
113
|
+
ai_msg = ""
|
|
114
|
+
tool_result = ""
|
|
115
|
+
|
|
116
|
+
for tool_call in tool_calls:
|
|
117
|
+
try:
|
|
118
|
+
if tool_call["name"] == "enter_playbook_mode":
|
|
119
|
+
tool_message = ToolMessage(
|
|
120
|
+
content=json.dumps("Entered Playbook Mode."),
|
|
121
|
+
name=tool_call["name"],
|
|
122
|
+
tool_call_id=tool_call["id"],
|
|
123
|
+
)
|
|
124
|
+
return Command(
|
|
125
|
+
goto="playbook",
|
|
126
|
+
update={"playbook_mode": "planning", "messages": [tool_message]}, # Entered Playbook mode
|
|
127
|
+
)
|
|
128
|
+
elif tool_call["name"] == "load_functions": # Handle load_functions separately
|
|
129
|
+
valid_tools, unconnected_links = await get_valid_tools(
|
|
130
|
+
tool_ids=tool_call["args"]["tool_ids"], registry=self.registry
|
|
131
|
+
)
|
|
132
|
+
new_tool_ids.extend(valid_tools)
|
|
133
|
+
# Create tool message response
|
|
134
|
+
tool_result = f"Successfully loaded {len(valid_tools)} tools: {valid_tools}"
|
|
135
|
+
links = "\n".join(unconnected_links)
|
|
136
|
+
if links:
|
|
137
|
+
ask_user = True
|
|
138
|
+
ai_msg = f"Please login to the following app(s) using the following links and let me know in order to proceed:\n {links} "
|
|
139
|
+
elif tool_call["name"] == "search_functions":
|
|
140
|
+
tool_result = await meta_tools["search_functions"].ainvoke(tool_call["args"])
|
|
141
|
+
except Exception as e:
|
|
142
|
+
tool_result = f"Error during {tool_call}: {e}"
|
|
143
|
+
|
|
144
|
+
tool_message = ToolMessage(
|
|
145
|
+
content=json.dumps(tool_result),
|
|
146
|
+
name=tool_call["name"],
|
|
147
|
+
tool_call_id=tool_call["id"],
|
|
148
|
+
)
|
|
149
|
+
tool_messages.append(tool_message)
|
|
150
|
+
|
|
151
|
+
if ask_user:
|
|
152
|
+
tool_messages.append(AIMessage(content=ai_msg))
|
|
153
|
+
return Command(update={"messages": tool_messages, "selected_tool_ids": new_tool_ids})
|
|
154
|
+
|
|
155
|
+
return Command(goto="call_model", update={"messages": tool_messages, "selected_tool_ids": new_tool_ids})
|
|
156
|
+
|
|
157
|
+
def sandbox(state: CodeActState) -> Command[Literal["call_model"]]:
|
|
158
|
+
code = state.get("code")
|
|
159
|
+
|
|
160
|
+
if not code:
|
|
161
|
+
logger.error("Sandbox called without code")
|
|
162
|
+
return Command(
|
|
163
|
+
goto="call_model",
|
|
164
|
+
update={"output": "Sandbox was called without any code to execute."},
|
|
165
|
+
)
|
|
166
|
+
|
|
167
|
+
previous_add_context = state.get("add_context", {})
|
|
168
|
+
add_context = inject_context(previous_add_context, self.tools_context)
|
|
169
|
+
existing_context = state.get("context", {})
|
|
170
|
+
context = {**existing_context, **add_context}
|
|
171
|
+
# Execute the script in the sandbox
|
|
172
|
+
|
|
173
|
+
output, new_context, new_add_context = self.eval_fn(
|
|
174
|
+
code, context, previous_add_context, 180
|
|
175
|
+
) # default timeout 3 min
|
|
176
|
+
output = smart_truncate(output)
|
|
177
|
+
|
|
178
|
+
return Command(
|
|
179
|
+
goto="call_model",
|
|
180
|
+
update={
|
|
181
|
+
"output": output,
|
|
182
|
+
"code": "",
|
|
183
|
+
"context": new_context,
|
|
184
|
+
"add_context": new_add_context,
|
|
185
|
+
},
|
|
186
|
+
)
|
|
187
|
+
|
|
188
|
+
def playbook(state: CodeActState) -> Command[Literal["call_model"]]:
|
|
189
|
+
playbook_mode = state.get("playbook_mode")
|
|
190
|
+
if playbook_mode == "planning":
|
|
191
|
+
planning_instructions = self.instructions + PLAYBOOK_PLANNING_PROMPT
|
|
192
|
+
messages = [{"role": "system", "content": planning_instructions}] + state["messages"]
|
|
193
|
+
|
|
194
|
+
response = self.model_instance.invoke(messages)
|
|
195
|
+
response = cast(AIMessage, response)
|
|
196
|
+
response_text = get_message_text(response)
|
|
197
|
+
# Extract plan from response text between triple backticks
|
|
198
|
+
plan_match = re.search(r"```(.*?)```", response_text, re.DOTALL)
|
|
199
|
+
if plan_match:
|
|
200
|
+
plan = plan_match.group(1).strip()
|
|
201
|
+
else:
|
|
202
|
+
plan = response_text.strip()
|
|
203
|
+
return Command(update={"messages": [response], "playbook_mode": "confirming", "plan": plan})
|
|
204
|
+
|
|
205
|
+
elif playbook_mode == "confirming":
|
|
206
|
+
confirmation_instructions = self.instructions + PLAYBOOK_CONFIRMING_PROMPT
|
|
207
|
+
messages = [{"role": "system", "content": confirmation_instructions}] + state["messages"]
|
|
208
|
+
response = self.model_instance.invoke(messages, stream=False)
|
|
209
|
+
response = get_message_text(response)
|
|
210
|
+
if "true" in response.lower():
|
|
211
|
+
return Command(goto="playbook", update={"playbook_mode": "generating"})
|
|
212
|
+
else:
|
|
213
|
+
return Command(goto="playbook", update={"playbook_mode": "planning"})
|
|
214
|
+
|
|
215
|
+
elif playbook_mode == "generating":
|
|
216
|
+
generating_instructions = self.instructions + PLAYBOOK_GENERATING_PROMPT
|
|
217
|
+
messages = [{"role": "system", "content": generating_instructions}] + state["messages"]
|
|
218
|
+
response = cast(AIMessage, self.model_instance.invoke(messages))
|
|
219
|
+
raw_content = get_message_text(response)
|
|
220
|
+
func_code = raw_content.strip()
|
|
221
|
+
func_code = func_code.replace("```python", "").replace("```", "")
|
|
222
|
+
func_code = func_code.strip()
|
|
223
|
+
|
|
224
|
+
# Extract function name (handle both regular and async functions)
|
|
225
|
+
match = re.search(r"^\s*(?:async\s+)?def\s+([a-zA-Z_][a-zA-Z0-9_]*)\s*\(", func_code, re.MULTILINE)
|
|
226
|
+
if match:
|
|
227
|
+
function_name = match.group(1)
|
|
228
|
+
else:
|
|
229
|
+
function_name = "generated_playbook"
|
|
230
|
+
|
|
231
|
+
# Save or update an Agent using the helper registry
|
|
232
|
+
saved_note = ""
|
|
233
|
+
try:
|
|
234
|
+
if not self.playbook_registry:
|
|
235
|
+
raise ValueError("Playbook registry is not configured")
|
|
236
|
+
|
|
237
|
+
# Build instructions payload embedding the plan and function code
|
|
238
|
+
instructions_payload = {
|
|
239
|
+
"playbookPlan": state["plan"],
|
|
240
|
+
"playbookScript": {
|
|
241
|
+
"name": function_name,
|
|
242
|
+
"code": func_code,
|
|
243
|
+
},
|
|
244
|
+
}
|
|
245
|
+
|
|
246
|
+
# Convert tool ids list to dict
|
|
247
|
+
tool_dict = convert_tool_ids_to_dict(state["selected_tool_ids"])
|
|
248
|
+
|
|
249
|
+
res = self.playbook_registry.create_agent(
|
|
250
|
+
name=function_name,
|
|
251
|
+
description=f"Generated playbook: {function_name}",
|
|
252
|
+
instructions=instructions_payload,
|
|
253
|
+
tools=tool_dict,
|
|
254
|
+
visibility="private",
|
|
255
|
+
)
|
|
256
|
+
saved_note = f"Successfully created your playbook! Check it out here: [View Playbook](https://wingmen.info/agents/{res.id})"
|
|
257
|
+
except Exception as e:
|
|
258
|
+
saved_note = f"Failed to save generated playbook as Agent '{function_name}': {e}"
|
|
259
|
+
|
|
260
|
+
# Mock tool call for exit_playbook_mode (for testing/demonstration)
|
|
261
|
+
mock_exit_tool_call = {"name": "exit_playbook_mode", "args": {}, "id": "mock_exit_playbook_123"}
|
|
262
|
+
mock_assistant_message = AIMessage(content=saved_note, tool_calls=[mock_exit_tool_call])
|
|
263
|
+
|
|
264
|
+
# Mock tool response for exit_playbook_mode
|
|
265
|
+
mock_exit_tool_response = ToolMessage(
|
|
266
|
+
content=json.dumps(f"Exited Playbook Mode.{saved_note}"),
|
|
267
|
+
name="exit_playbook_mode",
|
|
268
|
+
tool_call_id="mock_exit_playbook_123",
|
|
269
|
+
)
|
|
270
|
+
|
|
271
|
+
return Command(
|
|
272
|
+
update={"messages": [mock_assistant_message, mock_exit_tool_response], "playbook_mode": "normal"}
|
|
273
|
+
)
|
|
274
|
+
|
|
275
|
+
def route_entry(state: CodeActState) -> Literal["call_model", "playbook"]:
|
|
276
|
+
"""Route to either normal mode or playbook creation"""
|
|
277
|
+
if state.get("playbook_mode") in ["planning", "confirming", "generating"]:
|
|
278
|
+
return "playbook"
|
|
279
|
+
|
|
280
|
+
return "call_model"
|
|
281
|
+
|
|
282
|
+
agent = StateGraph(state_schema=CodeActState)
|
|
283
|
+
agent.add_node(call_model, retry_policy=RetryPolicy(max_attempts=3, retry_on=filter_retry_on))
|
|
284
|
+
agent.add_node(sandbox)
|
|
285
|
+
agent.add_node(playbook)
|
|
286
|
+
agent.add_node(execute_tools)
|
|
287
|
+
agent.add_conditional_edges(START, route_entry)
|
|
288
|
+
# agent.add_edge(START, "call_model")
|
|
289
|
+
return agent.compile(checkpointer=self.memory)
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
from universal_mcp.agentr.registry import AgentrRegistry
|
|
2
|
+
|
|
3
|
+
from universal_mcp.agents.unified import UnifiedAgent
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
async def agent():
|
|
7
|
+
agent_obj = UnifiedAgent(
|
|
8
|
+
name="CodeAct Agent",
|
|
9
|
+
instructions="Be very concise in your answers.",
|
|
10
|
+
model="anthropic:claude-4-sonnet-20250514",
|
|
11
|
+
tools=[],
|
|
12
|
+
registry=AgentrRegistry(),
|
|
13
|
+
)
|
|
14
|
+
return await agent_obj._build_graph()
|