universal-mcp-agents 0.1.17__py3-none-any.whl → 0.1.19rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of universal-mcp-agents might be problematic. Click here for more details.

@@ -7,6 +7,7 @@ from universal_mcp.agents.codeact import CodeActAgent as CodeActScript
7
7
  from universal_mcp.agents.codeact0 import CodeActPlaybookAgent as CodeActRepl
8
8
  from universal_mcp.agents.react import ReactAgent
9
9
  from universal_mcp.agents.simple import SimpleAgent
10
+ from universal_mcp.agents.unified import UnifiedAgent
10
11
 
11
12
 
12
13
  def get_agent(agent_name: Literal["react", "simple", "builder", "bigtool", "codeact-script", "codeact-repl"]):
@@ -22,6 +23,8 @@ def get_agent(agent_name: Literal["react", "simple", "builder", "bigtool", "code
22
23
  return CodeActScript
23
24
  elif agent_name == "codeact-repl":
24
25
  return CodeActRepl
26
+ elif agent_name == "unified":
27
+ return UnifiedAgent
25
28
  else:
26
29
  raise ValueError(
27
30
  f"Unknown agent: {agent_name}. Possible values: react, simple, builder, bigtool, codeact-script, codeact-repl"
@@ -19,12 +19,6 @@ async def main():
19
19
  memory=memory,
20
20
  )
21
21
  print("Starting agent...")
22
- # await agent.ainit()
23
- # await agent.run_interactive()
24
- # async for event in agent.stream(
25
- # user_input="Fetch unsubscribe links from my Gmail inbox for promo emails I have received in the last 7 days"
26
- # ):
27
- # print(event.content, end="")
28
22
  result = await agent.invoke(
29
23
  user_input="Fetch unsubscribe links from my Gmail inbox for promo emails I have received in the last 7 days"
30
24
  )
@@ -27,7 +27,7 @@ def smart_print(data: Any) -> None:
27
27
  Args:
28
28
  data: Either a dictionary with string keys, or a list of such dictionaries
29
29
  """
30
- print(light_copy(data)) # noqa
30
+ print(light_copy(data)) # noqa: T201
31
31
 
32
32
 
33
33
  def creative_writer(
@@ -275,105 +275,3 @@ def data_extractor(
275
275
  .invoke(prompt)
276
276
  )
277
277
  return cast(dict[str, Any], response)
278
-
279
-
280
- # news_articles_schema = {
281
- # "type": "object",
282
- # "properties": {
283
- # "articles": {
284
- # "type": "array",
285
- # "title": "Articles",
286
- # "description": "List of news articles",
287
- # "items": {
288
- # "type": "object",
289
- # "properties": {
290
- # "headline": {
291
- # "type": "string",
292
- # "title": "Headline",
293
- # "description": "The headline of the news article"
294
- # },
295
- # "url": {
296
- # "type": "string",
297
- # "title": "URL",
298
- # "description": "The URL of the news article"
299
- # }
300
- # },
301
- # "required": ["headline", "url"],
302
- # }
303
- # }
304
- # },
305
- # "required": ["articles"],
306
- # }
307
-
308
-
309
- # news_articles_schema = {
310
- # "title": "NewsArticleList",
311
- # "description": "A list of news articles with headlines and URLs",
312
- # "type": "object",
313
- # "properties": {
314
- # "articles": {
315
- # "type": "array",
316
- # "items": {
317
- # "type": "object",
318
- # "properties": {
319
- # "headline": {
320
- # "type": "string"
321
- # },
322
- # "url": {
323
- # "type": "string"
324
- # }
325
- # },
326
- # "required": ["headline", "url"]
327
- # }
328
- # }
329
- # },
330
- # "required": ["articles"]
331
- # }
332
- # model = init_chat_model(model="claude-4-sonnet-20250514", temperature=0)
333
- # structured_model = model.with_structured_output(news_articles_schema)
334
-
335
-
336
- # class TwitterComment(BaseModel):
337
- # skip: bool
338
- # reason: str
339
- # comment: str
340
-
341
- # twitter_comment_schema = {
342
- # "title": "TwitterComment",
343
- # "description": "A twitter comment to engage with followers",
344
- # "type": "object",
345
- # "properties": {
346
- # "skip": {
347
- # "type": "boolean"
348
- # },
349
- # "reason": {
350
- # "type": "string"
351
- # },
352
- # "comment": {
353
- # "type": "string"
354
- # },
355
- # "tagged_profiles": {
356
- # "type": "array",
357
- # "items": {
358
- # "type": "string"
359
- # }
360
- # }
361
- # },
362
- # "required": ["skip", "reason"]
363
- # }
364
-
365
- # comment = {
366
- # "tweet_id": "08109402",
367
- # "handle": "@iamnishant",
368
- # "text": "Hey really loved this tweet! Well said 💯"
369
- # }
370
-
371
- # comment_instructions = (
372
- # "Goal is to engage with my twitter followers who have commented on my tweets."
373
- # "Please generate a single line, context-aware, conversational reply for the given comment."
374
- # "- Use social media language (can use hinglish)."
375
- # "- Skip the reply, if the comment is too generic."
376
- # "- Also tag relevant people in the reply."
377
- # )
378
-
379
- # my_reply = call_llm(comment_instructions, comment, twitter_comment_schema)
@@ -2,18 +2,16 @@ import inspect
2
2
  import json
3
3
  import re
4
4
  from collections.abc import Callable
5
- from dataclasses import dataclass
6
- from pathlib import Path
7
5
  from typing import Literal, cast
8
6
 
9
- from langchain_core.messages import AIMessage, RemoveMessage, ToolMessage
7
+ from langchain_core.messages import AIMessage, ToolMessage
10
8
  from langchain_core.tools import StructuredTool
11
9
  from langchain_core.tools import tool as create_tool
12
10
  from langgraph.checkpoint.base import BaseCheckpointSaver
13
11
  from langgraph.graph import START, StateGraph
14
12
  from langgraph.types import Command, RetryPolicy
15
13
  from universal_mcp.tools.registry import ToolRegistry
16
- from universal_mcp.types import ToolFormat, ToolConfig
14
+ from universal_mcp.types import ToolConfig, ToolFormat
17
15
 
18
16
  from universal_mcp.agents.base import BaseAgent
19
17
  from universal_mcp.agents.codeact0.llm_tool import ai_classify, call_llm, data_extractor, smart_print
@@ -22,10 +20,14 @@ from universal_mcp.agents.codeact0.prompts import (
22
20
  )
23
21
  from universal_mcp.agents.codeact0.sandbox import eval_unsafe, execute_ipython_cell
24
22
  from universal_mcp.agents.codeact0.state import CodeActState
25
- from universal_mcp.agents.codeact0.tools import create_meta_tools, enter_playbook_mode, exit_playbook_mode, get_valid_tools
23
+ from universal_mcp.agents.codeact0.tools import (
24
+ create_meta_tools,
25
+ enter_playbook_mode,
26
+ get_valid_tools,
27
+ )
26
28
  from universal_mcp.agents.codeact0.utils import inject_context, smart_truncate
27
29
  from universal_mcp.agents.llm import load_chat_model
28
- from universal_mcp.agents.utils import filter_retry_on, get_message_text, convert_tool_ids_to_dict
30
+ from universal_mcp.agents.utils import convert_tool_ids_to_dict, filter_retry_on, get_message_text
29
31
 
30
32
  PLAYBOOK_PLANNING_PROMPT = """Now, you are tasked with creating a reusable playbook from the user's previous workflow.
31
33
 
@@ -48,7 +50,6 @@ Example:
48
50
  Now create a plan based on the conversation history. Enclose it between ``` and ```. Ask the user if the plan is okay."""
49
51
 
50
52
 
51
-
52
53
  PLAYBOOK_CONFIRMING_PROMPT = """Now, you are tasked with confirming the playbook plan. Return True if the user is happy with the plan, False otherwise. Do not say anything else in your response. The user response will be the last message in the chain.
53
54
  """
54
55
 
@@ -80,7 +81,7 @@ class CodeActPlaybookAgent(BaseAgent):
80
81
  memory=memory,
81
82
  **kwargs,
82
83
  )
83
- self.model_instance = load_chat_model(model, thinking=True)
84
+ self.model_instance = load_chat_model(model)
84
85
  self.tools_config = tools or []
85
86
  self.registry = registry
86
87
  self.playbook_registry = playbook_registry
@@ -92,20 +93,19 @@ class CodeActPlaybookAgent(BaseAgent):
92
93
  meta_tools = create_meta_tools(self.registry)
93
94
  additional_tools = [smart_print, data_extractor, ai_classify, call_llm, meta_tools["web_search"]]
94
95
  self.additional_tools = [t if isinstance(t, StructuredTool) else create_tool(t) for t in additional_tools]
96
+
95
97
  async def call_model(state: CodeActState) -> Command[Literal["sandbox", "execute_tools"]]:
96
98
  self.exported_tools = []
97
99
  if self.tools_config:
98
100
  # Convert dict format to list format if needed
99
101
  if isinstance(self.tools_config, dict):
100
102
  self.tools_config = [
101
- f"{provider}__{tool}"
102
- for provider, tools in self.tools_config.items()
103
- for tool in tools
103
+ f"{provider}__{tool}" for provider, tools in self.tools_config.items() for tool in tools
104
104
  ]
105
105
  if not self.registry:
106
106
  raise ValueError("Tools are configured but no registry is provided")
107
107
  # Langchain tools are fine
108
- self.tools_config.extend(state.get('selected_tool_ids',[]))
108
+ self.tools_config.extend(state.get("selected_tool_ids", []))
109
109
  self.exported_tools = await self.registry.export_tools(self.tools_config, ToolFormat.LANGCHAIN)
110
110
  self.final_instructions, self.tools_context = create_default_prompt(
111
111
  self.exported_tools, self.additional_tools, self.instructions
@@ -167,7 +167,7 @@ class CodeActPlaybookAgent(BaseAgent):
167
167
  )
168
168
  return Command(
169
169
  goto="playbook",
170
- update={"playbook_mode": "planning", "messages": [tool_message]}, #Entered Playbook mode
170
+ update={"playbook_mode": "planning", "messages": [tool_message]}, # Entered Playbook mode
171
171
  )
172
172
  elif tool_call["name"] == "execute_ipython_cell":
173
173
  return Command(goto="sandbox")
@@ -261,13 +261,12 @@ class CodeActPlaybookAgent(BaseAgent):
261
261
  response = cast(AIMessage, response)
262
262
  response_text = get_message_text(response)
263
263
  # Extract plan from response text between triple backticks
264
- plan_match = re.search(r'```(.*?)```', response_text, re.DOTALL)
264
+ plan_match = re.search(r"```(.*?)```", response_text, re.DOTALL)
265
265
  if plan_match:
266
266
  plan = plan_match.group(1).strip()
267
267
  else:
268
268
  plan = response_text.strip()
269
269
  return Command(update={"messages": [response], "playbook_mode": "confirming", "plan": plan})
270
-
271
270
 
272
271
  elif playbook_mode == "confirming":
273
272
  confirmation_instructions = self.instructions + PLAYBOOK_CONFIRMING_PROMPT
@@ -279,8 +278,6 @@ class CodeActPlaybookAgent(BaseAgent):
279
278
  else:
280
279
  return Command(goto="playbook", update={"playbook_mode": "planning"})
281
280
 
282
-
283
-
284
281
  elif playbook_mode == "generating":
285
282
  generating_instructions = self.instructions + PLAYBOOK_GENERATING_PROMPT
286
283
  messages = [{"role": "system", "content": generating_instructions}] + state["messages"]
@@ -327,25 +324,19 @@ class CodeActPlaybookAgent(BaseAgent):
327
324
  saved_note = f"Failed to save generated playbook as Agent '{function_name}': {e}"
328
325
 
329
326
  # Mock tool call for exit_playbook_mode (for testing/demonstration)
330
- mock_exit_tool_call = {
331
- "name": "exit_playbook_mode",
332
- "args": {},
333
- "id": "mock_exit_playbook_123"
334
- }
335
- mock_assistant_message = AIMessage(
336
- content=saved_note,
337
- tool_calls=[mock_exit_tool_call]
338
- )
327
+ mock_exit_tool_call = {"name": "exit_playbook_mode", "args": {}, "id": "mock_exit_playbook_123"}
328
+ mock_assistant_message = AIMessage(content=saved_note, tool_calls=[mock_exit_tool_call])
339
329
 
340
-
341
330
  # Mock tool response for exit_playbook_mode
342
331
  mock_exit_tool_response = ToolMessage(
343
332
  content=json.dumps(f"Exited Playbook Mode.{saved_note}"),
344
333
  name="exit_playbook_mode",
345
- tool_call_id="mock_exit_playbook_123"
334
+ tool_call_id="mock_exit_playbook_123",
346
335
  )
347
336
 
348
- return Command(update={"messages": [mock_assistant_message, mock_exit_tool_response], "playbook_mode": "normal"})
337
+ return Command(
338
+ update={"messages": [mock_assistant_message, mock_exit_tool_response], "playbook_mode": "normal"}
339
+ )
349
340
 
350
341
  def route_entry(state: CodeActState) -> Literal["call_model", "playbook"]:
351
342
  """Route to either normal mode or playbook creation"""
@@ -9,102 +9,38 @@ from universal_mcp.agents.codeact0.utils import schema_to_signature
9
9
  uneditable_prompt = """
10
10
  You are **Wingmen**, an AI Assistant created by AgentR — a creative, straight-forward, and direct principal software engineer with access to tools.
11
11
 
12
- ## Responsibilities
13
-
14
- - **Answer directly** if the task is simple (e.g. print, math, general knowledge).
15
- - For any task requiring logic, execution, or data handling, use `execute_ipython_cell`.
16
- - For writing or NLP tasks (summarizing, generating, extracting), always use AI functions via code never respond directly.
17
-
18
- ## Tool vs. Function: Required Separation
19
-
20
- You must clearly distinguish between tools (called via the tool calling API) and internal functions (used inside code blocks).
21
-
22
- ### Tools Must Be Called via Tool Calling API
23
-
24
- These must be called using **tool calling**, not from inside code blocks:
25
-
26
- - `execute_ipython_cell` For running any Python code or logic.
27
- - `search_functions` To discover available functions for a task.
28
- - `load_functions` To load a specific function by full ID.
29
-
30
- **Do not attempt to call these inside `python` code.**
31
- Use tool calling syntax for these operations.
32
-
33
- ### Functions — Must Be Used Inside Code Blocks
34
-
35
- All other functions, including LLM functions, must always be used within code executed by `execute_ipython_cell`. These include:
36
-
37
- - `smart_print()` — For inspecting unknown data structures before looping.
38
- - `asyncio.run()` For wrapping and executing asynchronous logic. You must not use await outside an async function. And the async function must be called by `asyncio.run()`.
39
- - Any functions for applications loaded via `load_functions`.
40
- - Any logic, data handling, writing, NLP, generation, summarization, or extraction functionality of LLMs.
41
-
42
- These must be called **inside a Python code block**, and that block must be executed using `execute_ipython_cell`.
43
-
44
- ## Tool/Function Usage Policy
45
-
46
- 1. **Always Use Tools/Functions for Required Tasks**
47
- Any searching, loading, or executing must be done using a tool/function call. Never answer manually if a tool/function is appropriate.
48
-
49
- 2. **Use Existing Functions First**
50
- Use existing functions if available. Otherwise, use `search_functions` with a concise query describing the task.
51
-
52
- 3. **Load Only Relevant Tools**
53
- When calling `load_functions`, include only relevant function IDs.
54
- - Prefer connected applications over unconnected ones.
55
- - If multiple functions match (i.e. if none are connected, or multiple are connected), ask the user to choose.
56
- - After loading a tool, you do not need to import/declare it again. It can be called directly in further cells.
57
-
58
- 4. **Follow First Turn Process Strictly**
59
- On the **first turn**, do only **one** of the following:
60
- - Handle directly (if trivial)
61
- - Use a tool/function (`execute_ipython_cell`, `search_functions`, etc.)
62
-
63
- **Do not extend the conversation on the first message.**
64
-
65
- ## Coding Rules
66
-
67
- - Use `smart_print()` to inspect unknown structures, especially those received from function outputs, before looping or branching.
68
- - Validate logic with a single item before processing lists or large inputs.
69
- - Try to achieve as much as possible in a single code block.
70
- - Use only pre-installed Python libraries. Do import them once before using.
71
- - Outer level functions, variables, classes, and imports declared previously can be used in later cells.
72
- - For all functions, call using keyword arguments only. DO NOT use any positional arguments.
73
-
74
- ### **Async Function Usage — Critical**
75
-
76
- When calling asynchronous functions:
77
- - You must define or use an **inner async function**.
78
- - Use `await` only **inside** that async function.
79
- - Run it using `asyncio.run(<function_name>())` **without** `await` at the outer level.
80
-
81
- **Wrong - Using `await` outside an async function**
82
- ```
83
- result = await some_async_function()
84
- ```
85
- **Wrong - Attaching await before asyncio.run**.
86
- `await asyncio.run(main())`
87
- These will raise SyntaxError: 'await' outside async function
88
- The correct method is the following-
89
- ```
90
- import asyncio
91
- async def some_async_function():
92
- ...
93
-
94
- async def main():
95
- result = await some_async_function()
96
- print(result)
97
-
98
- asyncio.run(main())
99
- #or
100
- result = asyncio.run(some_async_function(arg1 = <arg1>))
101
- ```
102
- ## Output Formatting
103
- - All code results must be returned in **Markdown**.
104
- - The user cannot see raw output, so format results clearly:
105
- - Use tables for structured data.
106
- - Provide links for files or images.
107
- - Be explicit in formatting to ensure readability.
12
+ Your job is to answer the user's question or perform the task they ask for.
13
+ - Answer simple questions (which do not require you to write any code or access any external resources) directly. Note that any operation that involves using ONLY print functions should be answered directly.
14
+ - For task requiring operations or access to external resources, you should achieve the task by executing Python code snippets.
15
+ - You have access to `execute_ipython_cell` tool that allows you to execute Python code in an IPython notebook cell.
16
+ - You also have access to two tools for finding and loading more python functions- `search_functions` and `load_functions`, which you must use for finding functions for using different external applications. Prefer pre-loaded or functions already available when possible, and prioritize connected applications over unconnected ones. When this is not enough to break a tie between similar applications, ask the user.
17
+ - In writing or natural language processing tasks DO NOT answer directly. Instead use `execute_ipython_cell` tool with the AI functions provided to you for tasks like summarizing, text generation, classification, data extraction from text or unstructured data, etc. Avoid hardcoded approaches to classification, data extraction.
18
+ - The code you write will be executed in a sandbox environment, and you can use the output of previous executions in your code. variables, functions, imports are retained.
19
+ - Read and understand the output of the previous code snippet and use it to answer the user's request. Note that the code output is NOT visible to the user, so after the task is complete, you have to give the output to the user in a markdown format.
20
+ - If needed, feel free to ask for more information from the user (without using the `execute_ipython_cell` tool) to clarify the task.
21
+
22
+ GUIDELINES for writing code:
23
+ - Variables defined at the top level of previous code snippets can be referenced in your code.
24
+ - External functions which return a dict or list[dict] are ambiguous. Therefore, you MUST explore the structure of the returned data using `smart_print()` statements before using it, printing keys and values. `smart_print` truncates long strings from data, preventing huge output logs.
25
+ - When an operation involves running a fixed set of steps on a list of items, run one run correctly and then use a for loop to run the steps on each item in the list.
26
+ - In a single code snippet, try to achieve as much as possible.
27
+ - You can only import libraries that come pre-installed with Python. For external functions, use the search and load tools to access them in the code.
28
+ - For displaying final results to the user, you must present your output in markdown format, including image links, so that they are rendered and displayed to the user. The code output is NOT visible to the user.
29
+ - Call all functions using keyword arguments only, never positional arguments.
30
+ - Async Functions (Critical): Use them only as follows-
31
+ Case 1: Top-level await without asyncio.run()
32
+ Wrap in async function and call with asyncio.run():
33
+ async def main():
34
+ result = await some_async_function()
35
+ return result
36
+ asyncio.run(main())
37
+ Case 2: Using asyncio.run() directly
38
+ If code already contains asyncio.run(), use as-is do not wrap again:
39
+ asyncio.run(some_async_function())
40
+ Rules:
41
+ - Never use await outside an async function
42
+ - Never use await asyncio.run()
43
+ - Never nest asyncio.run() calls
108
44
  """
109
45
 
110
46
 
@@ -6,18 +6,19 @@ from langchain_core.tools import tool
6
6
  from universal_mcp.tools.registry import ToolRegistry
7
7
  from universal_mcp.types import ToolFormat
8
8
 
9
- MAX_LENGHT=100
9
+ MAX_LENGHT = 100
10
+
10
11
 
11
12
  def enter_playbook_mode():
12
13
  """Call this function to enter playbook mode. Playbook mode is when the user wants to store a repeated task as a script with some inputs for the future."""
13
14
  return
14
15
 
16
+
15
17
  def exit_playbook_mode():
16
18
  """Call this function to exit playbook mode. Playbook mode is when the user wants to store a repeated task as a script with some inputs for the future."""
17
19
  return
18
20
 
19
21
 
20
-
21
22
  def create_meta_tools(tool_registry: ToolRegistry) -> dict[str, Any]:
22
23
  """Create the meta tools for searching and loading tools"""
23
24
 
@@ -46,7 +47,7 @@ def create_meta_tools(tool_registry: ToolRegistry) -> dict[str, Any]:
46
47
  for tool in tools_list:
47
48
  app = tool["id"].split("__")[0]
48
49
  tool_id = tool["id"]
49
-
50
+
50
51
  # Check if within limit and add to set (automatically deduplicates)
51
52
  if len(app_tools[app]) < MAX_LENGTH:
52
53
  cleaned_desc = tool["description"].split("Context:")[0].strip()
@@ -103,29 +104,30 @@ def create_meta_tools(tool_registry: ToolRegistry) -> dict[str, Any]:
103
104
  return f"Successfully loaded {len(tool_ids)} functions: {tool_ids}"
104
105
 
105
106
  @tool
106
- async def web_search(query: str) -> list:
107
- """Search the web for the given query and return structured search results.
107
+ async def web_search(query: str) -> dict:
108
+ """
109
+ Get an LLM answer to a question informed by Exa search results.
108
110
 
109
- Do not use for app-specific searches (for example, reddit or linkedin searches
111
+ This tool performs an Exa `/answer` request, which:
112
+ 1. Provides a **direct answer** for factual queries (e.g., "What is the capital of France?" → "Paris")
113
+ 2. Generates a **summary with citations** for open-ended questions
114
+ (e.g., "What is the state of AI in healthcare?" → A detailed summary with source links)
110
115
 
116
+ Args:
117
+ query (str): The question or topic to answer.
111
118
  Returns:
112
- list: A list of up to 10 search result dictionaries, each containing:
113
- - id (str): Unique identifier, typically the URL
114
- - title (str): The title/headline of the search result
115
- - url (str): The web URL of the result
116
- - publishedDate (str): ISO 8601 formatted date (e.g., "2025-01-01T00:00:00.000Z")
117
- - author (str): Author name (may be empty string)
118
- - summary (str): Text summary/snippet of the content
119
- - image (str): URL to associated image (if available)
120
-
121
- Example:
122
- results = await web_search(query="python programming")
119
+ dict: A structured response containing only:
120
+ - answer (str): Generated answer
121
+ - citations (list[dict]): List of cited sources
123
122
  """
124
- await tool_registry.export_tools(["exa__search_with_filters"], ToolFormat.LANGCHAIN)
125
- response = await tool_registry.call_tool(
126
- "exa__search_with_filters", {"query": query, "contents": {"summary": True}}
127
- )
128
- return response["results"]
123
+ await tool_registry.export_tools(["exa__answer"], ToolFormat.LANGCHAIN)
124
+ response = await tool_registry.call_tool("exa__answer", {"query": query, "text": True})
125
+
126
+ # Extract only desired fields
127
+ return {
128
+ "answer": response.get("answer"),
129
+ "citations": response.get("citations", []),
130
+ }
129
131
 
130
132
  return {"search_functions": search_functions, "load_functions": load_functions, "web_search": web_search}
131
133
 
@@ -6,7 +6,7 @@ from typing import Any
6
6
 
7
7
  from langchain_core.messages import BaseMessage
8
8
 
9
- MAX_CHARS = 700
9
+ MAX_CHARS = 5000
10
10
 
11
11
 
12
12
  def light_copy(data):
@@ -0,0 +1,90 @@
1
+ import contextlib
2
+ import inspect
3
+ import io
4
+ import queue
5
+ import re
6
+ import socket
7
+ import threading
8
+ import types
9
+ from typing import Any
10
+
11
+ from universal_mcp.agents.codeact0.utils import derive_context
12
+
13
+
14
+ class Sandbox:
15
+ """
16
+ A class to execute code safely in a sandboxed environment with a timeout.
17
+ """
18
+
19
+ def __init__(self, timeout: int = 180):
20
+ """
21
+ Initializes the Sandbox.
22
+ Args:
23
+ timeout: The timeout for code execution in seconds.
24
+ """
25
+ self.timeout = timeout
26
+ self._locals: dict[str, Any] = {}
27
+ self.add_context: dict[str, Any] = {}
28
+
29
+ def run(self, code: str) -> tuple[str, dict[str, Any], dict[str, Any]]:
30
+ """
31
+ Execute code safely with a timeout.
32
+ - Returns (output_str, filtered_locals_dict, new_add_context)
33
+ - Errors or timeout are returned as output_str.
34
+ - Previous variables in _locals persist across calls.
35
+ """
36
+
37
+ EXCLUDE_TYPES = (
38
+ types.ModuleType,
39
+ type(re.match("", "")),
40
+ type(threading.Lock()),
41
+ type(threading.RLock()),
42
+ threading.Event,
43
+ threading.Condition,
44
+ threading.Semaphore,
45
+ queue.Queue,
46
+ socket.socket,
47
+ io.IOBase,
48
+ )
49
+
50
+ result_container = {"output": "<no output>"}
51
+
52
+ def target():
53
+ try:
54
+ with contextlib.redirect_stdout(io.StringIO()) as f:
55
+ exec(code, self._locals, self._locals)
56
+ result_container["output"] = f.getvalue() or "<code ran, no output printed to stdout>"
57
+ except Exception as e:
58
+ result_container["output"] = "Error during execution: " + str(e)
59
+
60
+ thread = threading.Thread(target=target)
61
+ thread.start()
62
+ thread.join(self.timeout)
63
+
64
+ if thread.is_alive():
65
+ result_container["output"] = f"Code timeout: code execution exceeded {self.timeout} seconds."
66
+
67
+ # Filter locals for picklable/storable variables
68
+ all_vars = {}
69
+ for key, value in self._locals.items():
70
+ if key == "__builtins__":
71
+ continue
72
+ if inspect.iscoroutine(value) or inspect.iscoroutinefunction(value):
73
+ continue
74
+ if inspect.isasyncgen(value) or inspect.isasyncgenfunction(value):
75
+ continue
76
+ if isinstance(value, EXCLUDE_TYPES):
77
+ continue
78
+ if not callable(value) or not hasattr(value, "__name__"):
79
+ all_vars[key] = value
80
+
81
+ self._locals = all_vars
82
+
83
+ # Safely derive context
84
+ try:
85
+ self.add_context = derive_context(code, self.add_context)
86
+ except Exception:
87
+ # Keep the old context if derivation fails
88
+ pass
89
+
90
+ return result_container["output"], self._locals, self.add_context
@@ -0,0 +1,45 @@
1
+ # Unified Agent
2
+
3
+ The Unified Agent is a sophisticated AI assistant designed to understand and execute tasks by writing and running Python code. It operates within a secure sandbox environment and can leverage a variety of tools to interact with external systems and perform complex operations. A key feature of the Unified Agent is its ability to create reusable "playbooks" from user workflows, enabling automation of repeated tasks.
4
+
5
+ ## Architecture
6
+
7
+ The agent's architecture is built upon the LangGraph library, creating a state machine that cycles between thinking (calling a language model) and acting (executing code or tools).
8
+
9
+ ### Core Components:
10
+
11
+ * **`UnifiedAgent`**: The fundamental agent implementation. It processes user requests, writes Python code, and executes it in a sandbox to achieve the desired outcome. It also has a "playbook mode" to generate reusable Python functions from a user's workflow.
12
+ * **State Graph (`CodeActState`)**: The agent's logic is defined as a state graph. The primary nodes are:
13
+ * `call_model`: Invokes the language model to generate Python code or select a tool based on the current state and user input.
14
+ * `sandbox`: Executes the generated Python code using a safe `eval` function with a timeout. The results and any errors are captured and fed back into the state.
15
+ * `execute_tools`: Handles the execution of meta-tools for searching, loading, and interacting with external functions.
16
+ * `playbook`: Manages the playbook creation process, including planning, user confirmation, and code generation.
17
+ * **Sandbox (`sandbox.py`)**: A secure execution environment that runs Python code in a separate thread with a timeout. It ensures that the agent's code execution is isolated and cannot harm the host system.
18
+ * **Tools**: The agent has access to a set of powerful tools:
19
+ * `execute_ipython_cell`: The primary tool for executing arbitrary Python code snippets.
20
+ * **AI Functions (`llm_tool.py`)**: A suite of functions (`generate_text`, `classify_data`, `extract_data`, `call_llm`) that allow the agent to delegate complex reasoning, classification, and data extraction tasks to a language model.
21
+ * **Meta Tools (`tools.py`)**: Functions like `search_functions` and `load_functions` that enable the agent to dynamically discover and load new tools from a `ToolRegistry`.
22
+
23
+ ## Playbook Mode
24
+
25
+ A key feature of the Unified Agent is its ability to create reusable "playbooks". When a user performs a task that they might want to repeat in the future, they can trigger the playbook mode. The agent will then:
26
+
27
+ 1. **Plan:** Analyze the workflow and create a step-by-step plan for a reusable function, identifying user-specific variables that should become function parameters.
28
+ 2. **Confirm:** Ask the user for confirmation of the generated plan.
29
+ 3. **Generate:** Generate a Python function based on the confirmed plan. This function can be saved and executed later to automate the task.
30
+
31
+ ## Getting Started (`__main__.py`)
32
+
33
+ The `__main__.py` file serves as a simple command-line interface for interacting with the agent. It demonstrates how to instantiate the `UnifiedAgent`, configure it with tools, and invoke it with a user request. This allows for easy testing and experimentation with the agent's capabilities.
34
+
35
+ To run the agent, execute the following command from the root of the repository:
36
+ ```bash
37
+ uv run python -m src.universal_mcp.agents.unified.__main__
38
+ ```
39
+
40
+ Major TODO:
41
+ - [] Improve LLM Tools
42
+ - [] Use smaller dedicated models for universal_write, clasify etc
43
+ - Improve Sandbox
44
+ - [] Support saving loading context
45
+ - [] Direct async tool support
@@ -0,0 +1,3 @@
1
+ from .agent import UnifiedAgent
2
+
3
+ __all__ = ["UnifiedAgent"]