universal-mcp-agents 0.1.11__py3-none-any.whl → 0.1.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. universal_mcp/agents/__init__.py +17 -19
  2. universal_mcp/agents/base.py +10 -7
  3. universal_mcp/agents/{bigtoolcache → bigtool}/__init__.py +2 -2
  4. universal_mcp/agents/{bigtoolcache → bigtool}/__main__.py +0 -1
  5. universal_mcp/agents/{bigtoolcache → bigtool}/agent.py +0 -1
  6. universal_mcp/agents/{bigtoolcache → bigtool}/graph.py +6 -5
  7. universal_mcp/agents/builder/__main__.py +125 -0
  8. universal_mcp/agents/builder/builder.py +225 -0
  9. universal_mcp/agents/builder/prompts.py +173 -0
  10. universal_mcp/agents/builder/state.py +24 -0
  11. universal_mcp/agents/cli.py +3 -2
  12. universal_mcp/agents/codeact/__main__.py +2 -4
  13. universal_mcp/agents/codeact/agent.py +166 -64
  14. universal_mcp/agents/codeact/models.py +11 -0
  15. universal_mcp/agents/codeact/prompts.py +12 -12
  16. universal_mcp/agents/codeact/sandbox.py +69 -23
  17. universal_mcp/agents/codeact/state.py +2 -0
  18. universal_mcp/agents/codeact0/__init__.py +3 -0
  19. universal_mcp/agents/codeact0/__main__.py +35 -0
  20. universal_mcp/agents/codeact0/agent.py +136 -0
  21. universal_mcp/agents/codeact0/config.py +77 -0
  22. universal_mcp/agents/codeact0/llm_tool.py +379 -0
  23. universal_mcp/agents/codeact0/prompts.py +156 -0
  24. universal_mcp/agents/codeact0/sandbox.py +90 -0
  25. universal_mcp/agents/codeact0/state.py +12 -0
  26. universal_mcp/agents/codeact0/usecases/1-unsubscribe.yaml +4 -0
  27. universal_mcp/agents/codeact0/usecases/10-reddit2.yaml +10 -0
  28. universal_mcp/agents/codeact0/usecases/11-github.yaml +13 -0
  29. universal_mcp/agents/codeact0/usecases/2-reddit.yaml +27 -0
  30. universal_mcp/agents/codeact0/usecases/2.1-instructions.md +81 -0
  31. universal_mcp/agents/codeact0/usecases/2.2-instructions.md +71 -0
  32. universal_mcp/agents/codeact0/usecases/3-earnings.yaml +4 -0
  33. universal_mcp/agents/codeact0/usecases/4-maps.yaml +41 -0
  34. universal_mcp/agents/codeact0/usecases/5-gmailreply.yaml +8 -0
  35. universal_mcp/agents/codeact0/usecases/6-contract.yaml +6 -0
  36. universal_mcp/agents/codeact0/usecases/7-overnight.yaml +14 -0
  37. universal_mcp/agents/codeact0/usecases/8-sheets_chart.yaml +25 -0
  38. universal_mcp/agents/codeact0/usecases/9-learning.yaml +9 -0
  39. universal_mcp/agents/codeact0/utils.py +374 -0
  40. universal_mcp/agents/hil.py +4 -4
  41. universal_mcp/agents/planner/__init__.py +7 -1
  42. universal_mcp/agents/react.py +11 -3
  43. universal_mcp/agents/shared/tool_node.py +1 -34
  44. universal_mcp/agents/simple.py +12 -2
  45. universal_mcp/agents/utils.py +17 -0
  46. universal_mcp/applications/llm/__init__.py +3 -0
  47. universal_mcp/applications/llm/app.py +158 -0
  48. universal_mcp/applications/ui/app.py +118 -144
  49. {universal_mcp_agents-0.1.11.dist-info → universal_mcp_agents-0.1.13.dist-info}/METADATA +1 -1
  50. universal_mcp_agents-0.1.13.dist-info/RECORD +63 -0
  51. universal_mcp/agents/bigtool2/__init__.py +0 -67
  52. universal_mcp/agents/bigtool2/__main__.py +0 -23
  53. universal_mcp/agents/bigtool2/agent.py +0 -13
  54. universal_mcp/agents/bigtool2/graph.py +0 -155
  55. universal_mcp/agents/bigtool2/meta_tools.py +0 -120
  56. universal_mcp/agents/bigtool2/prompts.py +0 -15
  57. universal_mcp/agents/bigtoolcache/state.py +0 -27
  58. universal_mcp/agents/builder.py +0 -204
  59. universal_mcp_agents-0.1.11.dist-info/RECORD +0 -42
  60. /universal_mcp/agents/{bigtoolcache → bigtool}/context.py +0 -0
  61. /universal_mcp/agents/{bigtoolcache → bigtool}/prompts.py +0 -0
  62. /universal_mcp/agents/{bigtool2 → bigtool}/state.py +0 -0
  63. /universal_mcp/agents/{bigtoolcache → bigtool}/tools.py +0 -0
  64. {universal_mcp_agents-0.1.11.dist-info → universal_mcp_agents-0.1.13.dist-info}/WHEEL +0 -0
@@ -0,0 +1,173 @@
1
+ AGENT_BUILDER_INSTRUCTIONS = r"""
2
+ You are a specialized Agent Generation AI, tasked with creating intelligent, effective, and context-aware AI agents based on user requests.
3
+
4
+ When given a user's request, immediately follow this structured process:
5
+
6
+ # 1. Intent Breakdown
7
+ - Clearly identify the primary goal the user wants the agent to achieve.
8
+ - Recognize any special requirements, constraints, formatting requests, or interaction rules.
9
+ - Summarize your understanding briefly to ensure alignment with user intent.
10
+
11
+ # 2. Agent Profile Definition
12
+ - **Name (2-4 words)**: Concise, clear, and memorable name reflecting core functionality.
13
+ - **Description (1-2 sentences)**: Captures the unique value and primary benefit to users.
14
+ - **Expertise**: Precise domain-specific expertise area. Avoid vague or overly general titles.
15
+ - **Instructions**: Compose detailed, highly actionable system instructions that directly command the agent's behavior. Respond in markdown as this text will be rendered in a rich text editor. Write instructions as clear imperatives, without preamble, assuming the agent identity is already established externally.
16
+ - **Schedule**: If the user specifies a schedule, you should also provide a cron expression for the agent to run on. The schedule should be in a proper cron expression and nothing more. Do not respond with any other information or explain your reasoning for the schedule, otherwise this will cause a parsing error that is undesirable.
17
+
18
+ ## ROLE & RESPONSIBILITY
19
+ - Clearly state the agent's primary mission, e.g., "Your primary mission is...", "Your core responsibility is...".
20
+ - Outline the exact tasks it handles, specifying expected input/output clearly.
21
+
22
+ ## INTERACTION STYLE
23
+ - Define exactly how to communicate with users: tone, format, response structure.
24
+ - Include explicit commands, e.g., "Always wrap responses in \`\`\`text\`\`\` blocks.", "Never add greetings or meta-information.", "Always provide outputs in user's requested languages."
25
+
26
+ ## OUTPUT FORMATTING RULES
27
+ - Clearly specify formatting standards required by the user (e.g., JSON, plain text, markdown).
28
+ - Include explicit examples to illustrate correct formatting.
29
+
30
+ ## LIMITATIONS & CONSTRAINTS
31
+ - Explicitly define boundaries of the agent's capabilities.
32
+ - Clearly state what the agent must never do or say.
33
+ - Include exact phrases for declining requests outside scope.
34
+
35
+ ## REAL-WORLD EXAMPLES
36
+ Provide two explicit interaction examples showing:
37
+ - User's typical request.
38
+ - Final agent response demonstrating perfect compliance.
39
+
40
+ Create an agent that feels thoughtfully designed, intelligent, and professionally reliable, perfectly matched to the user's original intent.
41
+ """
42
+
43
+
44
+ TASK_SYNTHESIS_PROMPT = r"""
45
+ # ROLE & GOAL
46
+ You are a 'Task Synthesizer' AI. Your sole purpose is to combine an original user task and a subsequent modification request into a single, complete, and coherent new task. This new task must be a standalone instruction that accurately reflects the user's final intent and can be used to configure a new AI agent from scratch.
47
+
48
+ # CORE PRINCIPLES
49
+ 1. **Preserve All Details:** You must retain all specific, unmodified details from the original task (e.g., email addresses, subjects, search queries, file names).
50
+ 2. **Seamless Integration:** The user's modification must be integrated perfectly into the original task's context, replacing or adding information as required.
51
+ 3. **Clarity and Directness:** The final task should be a direct command, phrased as if it were the user's very first request.
52
+ 4. **Strict Output Format:** Your output MUST BE ONLY the new synthesized task string. Do not include any preamble, explanation, or quotation marks.
53
+
54
+ ---
55
+ # EXAMPLES
56
+
57
+ **EXAMPLE 1: Changing the application for an email task**
58
+
59
+ **Original Task:**
60
+ "Send an email to manoj@agentr.dev with the subject 'Hello' and body 'This is a test of the Gmail agent.' from my Gmail account"
61
+
62
+ **Modification Request:**
63
+ "Please use my Outlook account for this instead of Gmail."
64
+
65
+ **New Synthesized Task:**
66
+ Send an email to manoj@agentr.dev with the subject 'Hello' and body 'This is a test of the Outlook agent.' from my Outlook account
67
+
68
+ ---
69
+ **EXAMPLE 2: Modifying the scope and source for a calendar task**
70
+
71
+ **Original Task:**
72
+ "Show me events from today's Google Calendar"
73
+
74
+ **Modification Request:**
75
+ "Actually, I need to see the whole week, not just today. And can you check my Microsoft 365 calendar?"
76
+
77
+ **New Synthesized Task:**
78
+ Show me events for the whole week from my Microsoft 365 calendar
79
+
80
+ ---
81
+ **EXAMPLE 3: Changing the target and tool for a web search task**
82
+
83
+ **Original Task:**
84
+ "Find the best restaurants in Goa using exa web search"
85
+
86
+ **Modification Request:**
87
+ "Could you look for hotels instead of restaurants, and please use Perplexity for it."
88
+
89
+ **New Synthesized Task:**
90
+ Find the best hotels in Goa using Perplexity.
91
+
92
+ ---
93
+ **EXAMPLE 4: Altering the final action of a multi-step task**
94
+
95
+ **Original Task:**
96
+ "search reddit for posts on elon musk and then post a meme on him on linkedin"
97
+
98
+ **Modification Request:**
99
+ "Let's not post anything. Just find the posts and then summarize the key points into a text file for me."
100
+
101
+ **New Synthesized Task:**
102
+ search reddit for posts on elon musk and then summarize the key points into a text file
103
+
104
+ ---
105
+ # YOUR TASK
106
+
107
+ Now, perform this synthesis for the following inputs.
108
+
109
+ **Original Task:**
110
+ {original_task}
111
+
112
+ **Modification Request:**
113
+ {modification_request}
114
+
115
+ **New Synthesized Task:**
116
+ """
117
+
118
+ AGENT_FROM_CONVERSATION_PROMPT = r"""
119
+ # ROLE & GOAL
120
+ You are a highly intelligent 'Agent Analyst' AI. Your sole purpose is to analyze a raw conversation transcript between a user and an AI assistant and a definitive list of tools the assistant used. From this data, you must synthesize a complete, reusable AI agent profile.
121
+
122
+ # INPUTS
123
+ 1. **Conversation History:** A transcript of the dialogue.
124
+ 2. **Used Tools:** A definitive list of tool configurations (`{{app_id: [tool_names]}}`) that were successfully used to fulfill the user's requests in the conversation.
125
+
126
+ # 1. Intent Breakdown
127
+ - Clearly identify the primary goal the user wants the agent to achieve.
128
+ - Recognize any special requirements, constraints, formatting requests, or interaction rules.
129
+ - Summarize your understanding briefly to ensure alignment with user intent.
130
+
131
+ # 2. Agent Profile Definition
132
+ - **Name (2-4 words)**: Concise, clear, and memorable name reflecting core functionality.
133
+ - **Description (1-2 sentences)**: Captures the unique value and primary benefit to users.
134
+ - **Expertise**: Precise domain-specific expertise area. Avoid vague or overly general titles.
135
+ - **Instructions**: Compose detailed, highly actionable system instructions that directly command the agent's behavior. Respond in markdown as this text will be rendered in a rich text editor. Write instructions as clear imperatives, without preamble, assuming the agent identity is already established externally.
136
+ - **Schedule**: If the user specifies a schedule, you should also provide a cron expression for the agent to run on. The schedule should be in a proper cron expression and nothing more. Do not respond with any other information or explain your reasoning for the schedule, otherwise this will cause a parsing error that is undesirable.
137
+
138
+ ## ROLE & RESPONSIBILITY
139
+ - Clearly state the agent's primary mission, e.g., "Your primary mission is...", "Your core responsibility is...".
140
+ - Outline the exact tasks it handles, specifying expected input/output clearly.
141
+
142
+ ## INTERACTION STYLE
143
+ - Define exactly how to communicate with users: tone, format, response structure.
144
+ - Include explicit commands, e.g., "Always wrap responses in \`\`\`text\`\`\` blocks.", "Never add greetings or meta-information.", "Always provide outputs in user's requested languages."
145
+
146
+ ## OUTPUT FORMATTING RULES
147
+ - Clearly specify formatting standards required by the user (e.g., JSON, plain text, markdown).
148
+ - Include explicit examples to illustrate correct formatting.
149
+
150
+ ## LIMITATIONS & CONSTRAINTS
151
+ - Explicitly define boundaries of the agent's capabilities.
152
+ - Clearly state what the agent must never do or say.
153
+ - Include exact phrases for declining requests outside scope.
154
+
155
+ ## REAL-WORLD EXAMPLES
156
+ Provide two explicit interaction examples showing:
157
+ - User's typical request.
158
+ - Final agent response demonstrating perfect compliance.
159
+
160
+ Create an agent that feels thoughtfully designed, intelligent, and professionally reliable, perfectly matched to the user's original intent.
161
+
162
+ # YOUR TASK
163
+
164
+ Now, perform this analysis for the following inputs.
165
+
166
+ **INPUT - Conversation History:**
167
+ {conversation_history}
168
+
169
+ **INPUT - Used Tools:**
170
+ {tool_config}
171
+
172
+ **YOUR JSON OUTPUT:**
173
+ """
@@ -0,0 +1,24 @@
1
+ from collections.abc import Sequence
2
+ from typing import Annotated, TypedDict
3
+
4
+ from langchain_core.messages import BaseMessage
5
+ from langgraph.graph.message import add_messages
6
+ from pydantic import BaseModel, Field
7
+ from universal_mcp.types import ToolConfig
8
+
9
+
10
+ class Agent(BaseModel):
11
+ """Agent that can be created by the builder."""
12
+
13
+ name: str = Field(description="Name of the agent.")
14
+ description: str = Field(description="A small description of the agent.")
15
+ expertise: str = Field(description="The expertise of the agent.")
16
+ instructions: str = Field(description="The instructions for the agent to follow.")
17
+ schedule: str | None = Field(description="The cron expression for the agent to run on.", default=None)
18
+
19
+
20
+ class BuilderState(TypedDict):
21
+ user_task: str | None
22
+ generated_agent: Agent | None
23
+ tool_config: ToolConfig | None
24
+ messages: Annotated[Sequence[BaseMessage], add_messages]
@@ -1,3 +1,5 @@
1
+ import asyncio
2
+
1
3
  from langgraph.checkpoint.memory import MemorySaver
2
4
  from typer import Typer
3
5
  from universal_mcp.agentr.client import AgentrClient
@@ -18,7 +20,6 @@ app = Typer()
18
20
  )
19
21
  def run(name: str = "react"):
20
22
  """Run the agent CLI"""
21
- import asyncio
22
23
 
23
24
  setup_logger(log_file=None, level="ERROR")
24
25
  client = AgentrClient()
@@ -32,7 +33,7 @@ def run(name: str = "react"):
32
33
  },
33
34
  }
34
35
  agent_cls = get_agent(name)
35
- agent = agent_cls(name, **params)
36
+ agent = agent_cls(name=name, **params)
36
37
  asyncio.run(agent.run_interactive())
37
38
 
38
39
 
@@ -11,7 +11,7 @@ from universal_mcp.agents.utils import messages_to_list
11
11
  async def main():
12
12
  memory = MemorySaver()
13
13
  agent = CodeActAgent(
14
- "CodeAct Agent",
14
+ name="CodeAct Agent",
15
15
  instructions="Be very concise in your answers.",
16
16
  model="anthropic:claude-4-sonnet-20250514",
17
17
  tools={"google_mail": ["list_messages"]},
@@ -25,9 +25,7 @@ async def main():
25
25
  # user_input="Fetch unsubscribe links from my Gmail inbox for promo emails I have received in the last 7 days"
26
26
  # ):
27
27
  # print(event.content, end="")
28
- result = await agent.invoke(
29
- user_input="Fetch unsubscribe links from my Gmail inbox for promo emails I have received in the last 7 days"
30
- )
28
+ result = await agent.invoke(user_input="Get the 50th fibonacci number")
31
29
  print(messages_to_list(result["messages"]))
32
30
 
33
31
 
@@ -1,25 +1,34 @@
1
+ import ast
1
2
  from collections.abc import Callable
2
3
 
3
- from langchain_core.messages import AIMessageChunk
4
- from langchain_core.tools import StructuredTool
5
- from langchain_core.tools import tool as create_tool
4
+ from langchain_core.messages import AIMessage, HumanMessage
6
5
  from langgraph.checkpoint.base import BaseCheckpointSaver
7
- from langgraph.graph import END, StateGraph
6
+ from langgraph.graph import END, START, StateGraph
7
+ from langgraph.types import Command
8
+ from pydantic import BaseModel, Field
8
9
  from universal_mcp.logger import logger
9
10
  from universal_mcp.tools.registry import ToolRegistry
10
11
  from universal_mcp.types import ToolConfig, ToolFormat
11
12
 
12
13
  from universal_mcp.agents.base import BaseAgent
14
+ from universal_mcp.agents.codeact.models import SandboxOutput
13
15
  from universal_mcp.agents.codeact.prompts import (
14
16
  create_default_prompt,
15
17
  make_safe_function_name,
16
18
  )
17
19
  from universal_mcp.agents.codeact.sandbox import eval_unsafe
18
20
  from universal_mcp.agents.codeact.state import CodeActState
19
- from universal_mcp.agents.codeact.utils import extract_and_combine_codeblocks
20
21
  from universal_mcp.agents.llm import load_chat_model
21
22
 
22
23
 
24
+ class StructuredCodeResponse(BaseModel):
25
+ """Structured response for the CodeAct agent."""
26
+
27
+ reasoning: str = Field(..., description="The reasoning behind the generated script.")
28
+ script: str | None = Field(default=None, description="The Python script to be executed.")
29
+ task_complete: bool = Field(..., description="Whether the task is complete.")
30
+
31
+
23
32
  class CodeActAgent(BaseAgent):
24
33
  def __init__(
25
34
  self,
@@ -32,66 +41,58 @@ class CodeActAgent(BaseAgent):
32
41
  sandbox_timeout: int = 20,
33
42
  **kwargs,
34
43
  ):
35
- super().__init__(name, instructions, model, memory, **kwargs)
36
- self.model_instance = load_chat_model(model, thinking=False)
44
+ super().__init__(
45
+ name=name,
46
+ instructions=instructions,
47
+ model=model,
48
+ memory=memory,
49
+ **kwargs,
50
+ )
51
+ self.model_instance = load_chat_model(model)
37
52
  self.tools_config = tools or {}
38
53
  self.registry = registry
39
54
  self.eval_fn = eval_unsafe
40
55
  self.sandbox_timeout = sandbox_timeout
41
- self.processed_tools: list[StructuredTool | Callable] = []
56
+ self.processed_tools: dict[str, Callable] = {}
42
57
 
43
58
  async def _build_graph(self):
44
59
  if self.tools_config:
45
60
  if not self.registry:
46
61
  raise ValueError("Tools are configured but no registry is provided")
47
- # Langchain tools are fine
48
- exported_tools = await self.registry.export_tools(self.tools_config, ToolFormat.LANGCHAIN)
49
- self.processed_tools = [t if isinstance(t, StructuredTool) else create_tool(t) for t in exported_tools]
62
+ # Load native tools, these are python functions
63
+ exported_tools = await self.registry.export_tools(self.tools_config, ToolFormat.NATIVE)
64
+ for tool in exported_tools:
65
+ name = tool.__name__
66
+ safe_name = make_safe_function_name(name)
67
+ if name != safe_name:
68
+ logger.warning(f"Tool name {name} is not safe, using {safe_name} instead")
69
+ raise ValueError(f"Tool name {name} is not safe, using {safe_name} instead")
70
+ self.processed_tools[safe_name] = tool
50
71
 
51
72
  self.instructions = create_default_prompt(self.processed_tools, self.instructions)
52
73
 
53
74
  agent = StateGraph(CodeActState)
54
75
  agent.add_node("call_model", self.call_model)
76
+ agent.add_node("validate_code", self.validate_code)
55
77
  agent.add_node("sandbox", self.sandbox)
78
+ agent.add_node("final_answer", self.final_answer)
56
79
 
57
- agent.set_entry_point("call_model")
58
- agent.add_conditional_edges(
59
- "call_model",
60
- self.should_run_sandbox,
61
- {
62
- "sandbox": "sandbox",
63
- END: END,
64
- },
65
- )
66
- agent.add_edge("sandbox", "call_model")
67
- return agent.compile(checkpointer=self.memory)
68
-
69
- def should_run_sandbox(self, state: CodeActState) -> str:
70
- last_message = state["messages"][-1]
71
- if isinstance(last_message.content, str) and "TASK_COMPLETE" in last_message.content:
72
- return END
73
-
74
- if state.get("script"):
75
- return "sandbox"
76
- return END
80
+ agent.add_edge(START, "call_model")
77
81
 
78
- def _extract_content(self, response: AIMessageChunk) -> str:
79
- if isinstance(response.content, list):
80
- content = " ".join([c.get("text", "") for c in response.content])
81
- else:
82
- content = response.content
83
- return content
82
+ return agent.compile(checkpointer=self.memory)
84
83
 
85
- async def call_model(self, state: CodeActState) -> dict:
84
+ async def call_model(self, state: CodeActState) -> Command:
86
85
  logger.debug(f"Calling model with state: {state}")
87
- model = self.model_instance
86
+ model = self.model_instance.with_structured_output(StructuredCodeResponse)
88
87
 
89
88
  # Find the last script and its output in the message history
90
89
  previous_script = state.get("script", "")
91
90
  sandbox_output = state.get("sandbox_output", "")
91
+ syntax_error = state.get("syntax_error", "")
92
92
 
93
- logger.debug(f"Previous script: {previous_script}")
94
- logger.debug(f"Sandbox output: {sandbox_output}")
93
+ logger.debug(f"Previous script:\n {previous_script}")
94
+ logger.debug(f"Sandbox output:\n {sandbox_output}")
95
+ logger.debug(f"Syntax error:\n {syntax_error}")
95
96
 
96
97
  prompt_messages = [
97
98
  {"role": "system", "content": self.instructions},
@@ -101,38 +102,139 @@ class CodeActAgent(BaseAgent):
101
102
  feedback_message = (
102
103
  f"Here is the script you generated in the last turn:\n\n```python\n{previous_script}\n```\n\n"
103
104
  )
104
- if sandbox_output:
105
+ if syntax_error:
106
+ feedback_message += (
107
+ f"When parsing the script, it produced the following syntax error:\n\n```\n{syntax_error}\n```\n\n"
108
+ "Please fix the syntax and generate a new, correct script."
109
+ )
110
+ elif sandbox_output:
105
111
  feedback_message += (
106
112
  f"When executed, it produced the following output:\n\n```\n{sandbox_output}\n```\n\n"
107
113
  )
108
- feedback_message += "Based on this, please generate a new, improved script to continue the task. Remember to replace the old script entirely."
114
+ feedback_message += "Based on this output, decide if the task is complete. If it is, respond the final answer to the user in clean and readable Markdown format. Important: set `task_complete` to `True` and no need to provide script. If the task is not complete, generate a new script to get closer to the solution."
115
+
109
116
  prompt_messages.append({"role": "user", "content": feedback_message})
110
117
 
111
- logger.debug(f"Prompt messages: {prompt_messages}")
118
+ response: StructuredCodeResponse = await model.ainvoke(prompt_messages)
119
+
120
+ # We add the reasoning as the AI message content
121
+ ai_message = AIMessage(content=response.reasoning)
122
+
123
+ if response.task_complete:
124
+ return Command(
125
+ goto="final_answer",
126
+ update={
127
+ "messages": [ai_message],
128
+ "script": response.script,
129
+ "task_complete": response.task_complete,
130
+ "sandbox_output": sandbox_output,
131
+ "syntax_error": None,
132
+ },
133
+ )
134
+ else:
135
+ return Command(
136
+ goto="validate_code",
137
+ update={
138
+ "messages": [ai_message],
139
+ "script": response.script,
140
+ "task_complete": response.task_complete,
141
+ "sandbox_output": None,
142
+ "syntax_error": None,
143
+ },
144
+ )
112
145
 
113
- response = await model.ainvoke(prompt_messages)
114
- logger.debug(f"Model response: {response}")
146
+ async def validate_code(self, state: CodeActState) -> Command:
147
+ logger.debug(f"Validating code with script:\n {state['script']}")
148
+ script = state.get("script")
115
149
 
116
- text_content = self._extract_content(response)
117
- if not isinstance(text_content, str):
118
- raise ValueError(f"Content is not a string: {text_content}")
119
- code = extract_and_combine_codeblocks(text_content)
120
- logger.debug(f"Extracted code: {code}")
150
+ if not script:
151
+ return Command(
152
+ goto="call_model",
153
+ update={
154
+ "syntax_error": "Model did not provide a script but task is not complete. Please provide a script or set task_complete to True."
155
+ },
156
+ )
121
157
 
122
- return {"messages": [response], "script": code}
158
+ try:
159
+ ast.parse(script)
160
+ logger.debug("AST parsing successful.")
161
+ return Command(
162
+ goto="sandbox",
163
+ update={
164
+ "syntax_error": None,
165
+ },
166
+ )
167
+ except SyntaxError as e:
168
+ logger.warning(f"AST parsing failed: {e}")
169
+ return Command(
170
+ goto="call_model",
171
+ update={
172
+ "syntax_error": f"Syntax Error: {e}",
173
+ },
174
+ )
123
175
 
124
- async def sandbox(self, state: CodeActState) -> dict:
125
- logger.debug(f"Running sandbox with state: {state}")
176
+ async def sandbox(self, state: CodeActState) -> Command:
177
+ logger.debug(f"Running sandbox with script:\n {state['script']}")
126
178
  tools_context = {}
127
- for tool in self.processed_tools:
128
- safe_name = make_safe_function_name(tool.name)
129
- tool_callable = tool.coroutine if hasattr(tool, "coroutine") and tool.coroutine is not None else tool.func
130
- tools_context[safe_name] = tool_callable
179
+ for tool_name, tool_callable in self.processed_tools.items():
180
+ tools_context[tool_name] = tool_callable
131
181
 
182
+ output: SandboxOutput
132
183
  output, _ = await self.eval_fn(state["script"], tools_context, self.sandbox_timeout)
133
- logger.debug(f"Sandbox output: {output}")
134
- return {
135
- "messages": [AIMessageChunk(content=output.strip())],
136
- "script": None,
137
- "sandbox_output": output.strip(),
138
- }
184
+
185
+ # Format the output for the agent
186
+ formatted_output = "Code executed.\n\n"
187
+ MAX_OUTPUT_LEN = 20000 # Maximum number of characters to show for stdout/stderr
188
+
189
+ def truncate_output(text, max_len=MAX_OUTPUT_LEN):
190
+ if text is None:
191
+ return ""
192
+ text = text.strip()
193
+ if len(text) > max_len:
194
+ return text[:max_len] + "\n... (more output hidden)"
195
+ return text
196
+
197
+ if output.stdout:
198
+ truncated_stdout = truncate_output(output.stdout)
199
+ formatted_output += f"STDOUT:\n```\n{truncated_stdout}\n```\n\n"
200
+ if output.error:
201
+ truncated_stderr = truncate_output(output.error)
202
+ formatted_output += f"STDERR / ERROR:\n```\n{truncated_stderr}\n```\n"
203
+ if output.return_value is not None:
204
+ formatted_output += f"RETURN VALUE:\n```\n{repr(output.return_value)}\n```\n"
205
+
206
+ logger.debug(f"Sandbox output: {formatted_output}")
207
+ return Command(
208
+ goto="call_model",
209
+ update={"sandbox_output": formatted_output.strip()},
210
+ )
211
+
212
+ async def final_answer(self, state: CodeActState) -> Command:
213
+ logger.debug("Formatting final answer using LLM for markdown formatting.")
214
+
215
+ # Extract the original user prompt
216
+ user_prompt = ""
217
+ for msg in state["messages"]:
218
+ if isinstance(msg, HumanMessage):
219
+ user_prompt = msg.content
220
+ break
221
+
222
+ # Compose a prompt for the LLM to generate a concise, markdown-formatted answer
223
+ llm_prompt = (
224
+ "Given the following task and answer, write a concise, well-formatted markdown response suitable for a user.\n\n"
225
+ f"Task:\n{user_prompt}\n\n"
226
+ f"Answer:\n{state['sandbox_output']}\n\n"
227
+ "Respond only with the markdown-formatted answer."
228
+ )
229
+
230
+ # Use the model to generate the final formatted answer
231
+ response = await self.model_instance.ainvoke([{"role": "user", "content": llm_prompt}])
232
+ markdown_answer = response.content if hasattr(response, "content") else str(response)
233
+ logger.debug(f"Final answer:\n {markdown_answer}")
234
+
235
+ return Command(
236
+ goto=END,
237
+ update={
238
+ "messages": [AIMessage(content=markdown_answer)],
239
+ },
240
+ )
@@ -0,0 +1,11 @@
1
+ from typing import Any
2
+
3
+ from pydantic import BaseModel
4
+
5
+
6
+ class SandboxOutput(BaseModel):
7
+ """Structured output from the code sandbox."""
8
+
9
+ stdout: str
10
+ error: str | None = None
11
+ return_value: Any | None = None
@@ -1,8 +1,6 @@
1
1
  import inspect
2
2
  import re
3
- from collections.abc import Sequence
4
-
5
- from langchain_core.tools import StructuredTool
3
+ from collections.abc import Callable
6
4
 
7
5
 
8
6
  def make_safe_function_name(name: str) -> str:
@@ -19,7 +17,7 @@ def make_safe_function_name(name: str) -> str:
19
17
 
20
18
 
21
19
  def create_default_prompt(
22
- tools: Sequence[StructuredTool],
20
+ tools: dict[str, Callable],
23
21
  base_prompt: str | None = None,
24
22
  ):
25
23
  """Create default prompt for the CodeAct agent."""
@@ -45,16 +43,12 @@ If you need to ask for more information or provide the final answer, you can out
45
43
 
46
44
  In addition to the Python Standard Library, you can use the following functions:"""
47
45
 
48
- for tool in tools:
49
- # Use coroutine if it exists, otherwise use func
50
- tool_callable = tool.coroutine if hasattr(tool, "coroutine") and tool.coroutine is not None else tool.func
51
- # Create a safe function name
52
- safe_name = make_safe_function_name(tool.name)
46
+ for tool_name, tool_callable in tools.items():
53
47
  # Determine if it's an async function
54
48
  is_async = inspect.iscoroutinefunction(tool_callable)
55
49
  # Add appropriate function definition
56
- prompt += f'''\n{"async " if is_async else ""}def {safe_name}{str(inspect.signature(tool_callable))}:
57
- """{tool.description}"""
50
+ prompt += f'''\n{"async " if is_async else ""}def {tool_name}{str(inspect.signature(tool_callable))}:
51
+ """{tool_callable.__doc__}"""
58
52
  ...
59
53
  '''
60
54
 
@@ -68,6 +62,7 @@ IMPORTANT CODING STRATEGY:
68
62
  3. Since many of the provided tools are async, you must use `await` to call them from within `main()`.
69
63
  4. Write code up to the point where you make an API call/tool usage with an output.
70
64
  5. Print the type/shape and a sample entry of this output, and using that knowledge proceed to write the further code.
65
+ 6. The maximum number of characters that can be printed is 5000. Remove any unnecessary print statements.
71
66
 
72
67
  This means:
73
68
  - Write code that makes the API call or tool usage
@@ -78,5 +73,10 @@ This means:
78
73
 
79
74
  Reminder: use Python code snippets to call tools
80
75
 
81
- When you have completely finished the task, present the final result from your script to the user in a clean and readable Markdown format. Do not just summarize what you did; provide the actual output. For example, if you were asked to find unsubscribe links and your script found them, your final response should be a Markdown-formatted list of those links. After you have provided the final output, you MUST end your response with the exact phrase "TASK_COMPLETE"."""
76
+ When you have completely finished the task, present the final result from your script to the user in a clean and readable Markdown format. Do not just summarize what you did; provide the actual output. For example, if you were asked to find unsubscribe links and your script found them, your final response should be a Markdown-formatted list of those links.
77
+
78
+
79
+ Important:
80
+ After you have provided the final output, you MUST set `task_complete` to `True` in your response.
81
+ """
82
82
  return prompt