universal-mcp-agents 0.1.19rc1__py3-none-any.whl → 0.1.24rc3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- universal_mcp/agents/__init__.py +15 -16
- universal_mcp/agents/base.py +46 -35
- universal_mcp/agents/bigtool/state.py +1 -1
- universal_mcp/agents/cli.py +2 -5
- universal_mcp/agents/codeact0/__init__.py +2 -3
- universal_mcp/agents/codeact0/__main__.py +4 -7
- universal_mcp/agents/codeact0/agent.py +444 -96
- universal_mcp/agents/codeact0/langgraph_agent.py +1 -1
- universal_mcp/agents/codeact0/llm_tool.py +2 -254
- universal_mcp/agents/codeact0/prompts.py +247 -137
- universal_mcp/agents/codeact0/sandbox.py +52 -18
- universal_mcp/agents/codeact0/state.py +26 -6
- universal_mcp/agents/codeact0/tools.py +400 -74
- universal_mcp/agents/codeact0/utils.py +175 -11
- universal_mcp/agents/codeact00/__init__.py +3 -0
- universal_mcp/agents/{unified → codeact00}/__main__.py +4 -6
- universal_mcp/agents/codeact00/agent.py +578 -0
- universal_mcp/agents/codeact00/config.py +77 -0
- universal_mcp/agents/{unified → codeact00}/langgraph_agent.py +2 -2
- universal_mcp/agents/{unified → codeact00}/llm_tool.py +1 -1
- universal_mcp/agents/codeact00/prompts.py +364 -0
- universal_mcp/agents/{unified → codeact00}/sandbox.py +52 -18
- universal_mcp/agents/codeact00/state.py +66 -0
- universal_mcp/agents/codeact00/tools.py +525 -0
- universal_mcp/agents/codeact00/utils.py +678 -0
- universal_mcp/agents/codeact01/__init__.py +3 -0
- universal_mcp/agents/{codeact → codeact01}/__main__.py +4 -11
- universal_mcp/agents/codeact01/agent.py +413 -0
- universal_mcp/agents/codeact01/config.py +77 -0
- universal_mcp/agents/codeact01/langgraph_agent.py +14 -0
- universal_mcp/agents/codeact01/llm_tool.py +25 -0
- universal_mcp/agents/codeact01/prompts.py +246 -0
- universal_mcp/agents/codeact01/sandbox.py +162 -0
- universal_mcp/agents/{unified → codeact01}/state.py +26 -10
- universal_mcp/agents/codeact01/tools.py +648 -0
- universal_mcp/agents/{unified → codeact01}/utils.py +175 -11
- universal_mcp/agents/llm.py +14 -4
- universal_mcp/agents/react.py +3 -3
- universal_mcp/agents/sandbox.py +124 -69
- universal_mcp/applications/llm/app.py +76 -24
- {universal_mcp_agents-0.1.19rc1.dist-info → universal_mcp_agents-0.1.24rc3.dist-info}/METADATA +6 -5
- universal_mcp_agents-0.1.24rc3.dist-info/RECORD +66 -0
- universal_mcp/agents/codeact/__init__.py +0 -3
- universal_mcp/agents/codeact/agent.py +0 -240
- universal_mcp/agents/codeact/models.py +0 -11
- universal_mcp/agents/codeact/prompts.py +0 -82
- universal_mcp/agents/codeact/sandbox.py +0 -85
- universal_mcp/agents/codeact/state.py +0 -11
- universal_mcp/agents/codeact/utils.py +0 -68
- universal_mcp/agents/codeact0/playbook_agent.py +0 -355
- universal_mcp/agents/unified/README.md +0 -45
- universal_mcp/agents/unified/__init__.py +0 -3
- universal_mcp/agents/unified/agent.py +0 -289
- universal_mcp/agents/unified/prompts.py +0 -192
- universal_mcp/agents/unified/tools.py +0 -188
- universal_mcp_agents-0.1.19rc1.dist-info/RECORD +0 -64
- {universal_mcp_agents-0.1.19rc1.dist-info → universal_mcp_agents-0.1.24rc3.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,246 @@
|
|
|
1
|
+
import inspect
|
|
2
|
+
import re
|
|
3
|
+
from collections.abc import Callable
|
|
4
|
+
|
|
5
|
+
uneditable_prompt = """
|
|
6
|
+
You are **Ruzo**, an AI Assistant created by AgentR — a creative, straight-forward, and direct principal software engineer with access to tools.
|
|
7
|
+
|
|
8
|
+
Your job is to answer the user's question or perform the task they ask for.
|
|
9
|
+
- Answer simple questions (which do not require you to write any code or access any external resources) directly. Note that any operation that involves using ONLY print functions should be answered directly in the chat. NEVER write a string or sequences of strings yourself and print it.
|
|
10
|
+
- For task requiring operations or access to external resources, you should achieve the task by executing Python code snippets.
|
|
11
|
+
- You have access to `execute_ipython_cell` tool that allows you to execute Python code in an IPython notebook cell.
|
|
12
|
+
- You also have access to two tools for finding and loading more python functions- `search_functions` and `load_functions`, which you must use for finding functions for using different external applications or additional functionality.
|
|
13
|
+
- Prioritize connected applications over unconnected ones from the output of `search_functions`. However, if the user specifically asks for an application, you MUST use that irrespective of connection status.
|
|
14
|
+
- When multiple relevant apps are connected, or none of the apps are connected, YOU MUST ask the user to choose the application(s). The search results may also inform you when such a case occurs, and you must stop and ask the user if multiple apps are relevant.
|
|
15
|
+
- If needed, feel free to ask for more information from the user (without using the `execute_ipython_cell` tool) to clarify the task.
|
|
16
|
+
|
|
17
|
+
**Final Output Requirements:**
|
|
18
|
+
- Once you have all the information about the task, return the text directly to user in markdown format. Do NOT call `execute_ipython_cell` or any LLM tools again just for summarization. Do NOT use llm__generate_text for this purpose.
|
|
19
|
+
- Always respond in github flavoured markdown format.
|
|
20
|
+
- For charts and diagrams, use mermaid chart in markdown directly.
|
|
21
|
+
- Your final response should contain the complete answer to the user's request in a clear, well-formatted manner that directly addresses what they asked for.
|
|
22
|
+
- For file types like images, audio, documents, etc., you must use the `upload_file`/`save_file` function to upload the file to the server and render the link/path in the markdown response.
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
AGENT_BUILDER_PLANNING_PROMPT = """TASK: Analyze the conversation history and code execution to create a step-by-step non-technical plan for a reusable function.
|
|
26
|
+
Rules:
|
|
27
|
+
- Do NOT include the searching and loading of functions. Assume that the functions have already been loaded.
|
|
28
|
+
- The plan is a sequence of steps corresponding to the key logical steps taken to achieve the user's task in the conversation history, without focusing on technical specifics.
|
|
29
|
+
- You must output a JSON object with a single key "steps", which is a list of strings. Each string is a step in the agent.
|
|
30
|
+
- Identify user-provided information as variables that should become the main agent input parameters using `variable_name` syntax, enclosed by backticks `...`. Intermediate variables should be highlighted using italics, i.e. *...*, NEVER `...`
|
|
31
|
+
- Keep the logic generic and reusable. Avoid hardcoding any names/constants. Instead, keep them as variables with defaults. They should be represented as `variable_name(default = default_value)`.
|
|
32
|
+
- Have a human-friendly plan and inputs format. That is, it must not use internal IDs or keys used by APIs as either inputs or outputs to the overall plan; using them internally is okay.
|
|
33
|
+
- Be as concise as possible, especially for internal processing steps.
|
|
34
|
+
- For steps where the assistant's intelligence was used outside of the code to infer/decide/analyse something, replace it with the use of *llm__* functions in the plan if required.
|
|
35
|
+
|
|
36
|
+
Example Conversation History:
|
|
37
|
+
User Message: "Create an image using Gemini for Marvel Cinematic Universe in comic style"
|
|
38
|
+
Code snippet: image_result = await google_gemini__generate_image(prompt=prompt)
|
|
39
|
+
Assistant Message: "The image has been successfully generated [image_result]."
|
|
40
|
+
User Message: "Save the image in my OneDrive"
|
|
41
|
+
Code snippet: image_data = base64.b64decode(image_result['data'])
|
|
42
|
+
temp_file_path = tempfile.mktemp(suffix='.png')
|
|
43
|
+
with open(temp_file_path, 'wb') as f:
|
|
44
|
+
f.write(image_data)
|
|
45
|
+
# Upload the image to OneDrive with a descriptive filename
|
|
46
|
+
onedrive_filename = "Marvel_Cinematic_Universe_Comic_Style.png"
|
|
47
|
+
|
|
48
|
+
print(f"Uploading to OneDrive as: {onedrive_filename}")
|
|
49
|
+
|
|
50
|
+
# Upload to OneDrive root folder
|
|
51
|
+
upload_result = onedrive__upload_file(
|
|
52
|
+
file_path=temp_file_path,
|
|
53
|
+
parent_id='root',
|
|
54
|
+
file_name=onedrive_filename
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
Generated Steps:
|
|
58
|
+
"steps": [
|
|
59
|
+
"Generate an image using Gemini model with `image_prompt` and `style(default = 'comic')`",
|
|
60
|
+
"Upload the obtained image to OneDrive using `onedrive_filename(default = 'generated_image.png')` and `onedrive_parent_folder(default = 'root')`",
|
|
61
|
+
"Return confirmation of upload including file name and destination path, and link to the upload"
|
|
62
|
+
]
|
|
63
|
+
Note that internal variables like upload_result, image_result are not highlighted in the plan, and intermediate processing details are skipped.
|
|
64
|
+
Now create a plan based on the conversation history. Do not include any other text or explanation in your response. Just the JSON object.
|
|
65
|
+
Note that the following tools are pre-loaded for the agent's use, and can be inluded in your plan if needed as internal variables (especially the llm tools)-\n
|
|
66
|
+
"""
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
AGENT_BUILDER_GENERATING_PROMPT = """
|
|
70
|
+
You are tasked with generating granular, reusable Python code for an agent based on the final confirmed plan and the conversation history (user messages, assistant messages, and code executions).
|
|
71
|
+
|
|
72
|
+
Produce a set of small, single-purpose functions—typically one function per plan step—plus one top-level orchestrator function that calls the step functions in order to complete the task.
|
|
73
|
+
|
|
74
|
+
Rules-
|
|
75
|
+
- Do NOT include the searching and loading of functions. Assume required functions have already been loaded. Include imports you need.
|
|
76
|
+
- Your response must be **ONLY Python code**. No markdown or explanations.
|
|
77
|
+
- Define multiple top-level functions:
|
|
78
|
+
1) One small, clear function for each plan step (as granular as practical).
|
|
79
|
+
2) One top-level orchestrator function that calls the step functions in sequence to achieve the plan objectives.
|
|
80
|
+
- The orchestrator function's parameters **must exactly match the external variables** in the agent plan (the ones marked with backticks `` `variable_name` ``). Provide defaults exactly as specified in the plan when present. Variables in italics (i.e. enclosed in *...*) are internal and must not be orchestrator parameters.
|
|
81
|
+
- The orchestrator function MUST be declared with `def` or `async def` and be directly runnable with a single Python command (e.g., `image_generator(...)`). If it is async, assume the caller will `await` it.
|
|
82
|
+
- NEVER use asyncio or asyncio.run(). The code is executed in a ipython environment, so using await is enough.
|
|
83
|
+
- Step functions should accept only the inputs they need, return explicit outputs, and pass intermediate results forward via return values—not globals.
|
|
84
|
+
- Name functions in snake_case derived from their purpose/step. Use keyword arguments in calls; avoid positional-only calls.
|
|
85
|
+
- Keep the code self-contained and executable. Put imports at the top of the code. Do not nest functions unless strictly necessary.
|
|
86
|
+
- If previously executed code snippets exist, adapt and reuse their validated logic inside the appropriate step functions.
|
|
87
|
+
- Do not print the final output; return it from the orchestrator.
|
|
88
|
+
|
|
89
|
+
Example:
|
|
90
|
+
|
|
91
|
+
If the plan has:
|
|
92
|
+
|
|
93
|
+
"steps": [
|
|
94
|
+
"Receive creative description as image_prompt",
|
|
95
|
+
"Generate image using Gemini with style(default = 'comic')",
|
|
96
|
+
"Save temporary image internally as *temp_file_path*",
|
|
97
|
+
"Upload *temp_file_path* to OneDrive folder onedrive_parent_folder(default = 'root')"
|
|
98
|
+
]
|
|
99
|
+
|
|
100
|
+
Then the functions should look like:
|
|
101
|
+
|
|
102
|
+
```python
|
|
103
|
+
from typing import Dict
|
|
104
|
+
|
|
105
|
+
def generate_image(image_prompt: str, style: str = "comic") -> Dict:
|
|
106
|
+
# previously validated code to call Gemini
|
|
107
|
+
...
|
|
108
|
+
|
|
109
|
+
def save_temp_image(image_result: Dict) -> str:
|
|
110
|
+
# previously validated code to write bytes to a temp file
|
|
111
|
+
...
|
|
112
|
+
|
|
113
|
+
def upload_to_onedrive(temp_file_path: str, onedrive_parent_folder: str = "root") -> Dict:
|
|
114
|
+
# previously validated code to upload
|
|
115
|
+
...
|
|
116
|
+
|
|
117
|
+
def image_generator(image_prompt: str, style: str = "comic", onedrive_parent_folder: str = "root") -> Dict:
|
|
118
|
+
image_result = generate_image(image_prompt=image_prompt, style=style)
|
|
119
|
+
temp_file_path = save_temp_image(image_result=image_result)
|
|
120
|
+
upload_result = upload_to_onedrive(temp_file_path=temp_file_path, onedrive_parent_folder=onedrive_parent_folder)
|
|
121
|
+
return upload_result
|
|
122
|
+
```
|
|
123
|
+
|
|
124
|
+
Use this convention consistently to generate the final code.
|
|
125
|
+
Note that the following tools are pre-loaded for the agent's use, and can be included in your code-\n
|
|
126
|
+
"""
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
AGENT_BUILDER_META_PROMPT = """
|
|
130
|
+
You are preparing metadata for a reusable agent based on the confirmed step-by-step plan.
|
|
131
|
+
|
|
132
|
+
TASK: Create a concise, human-friendly name and a short description for the agent.
|
|
133
|
+
|
|
134
|
+
INPUTS:
|
|
135
|
+
- Conversation context and plan steps will be provided in prior messages
|
|
136
|
+
|
|
137
|
+
REQUIREMENTS:
|
|
138
|
+
1. Name: 3-6 words, Title Case, no punctuation except hyphens if needed
|
|
139
|
+
2. Description: Single sentence, <= 140 characters, clearly states what the agent does
|
|
140
|
+
|
|
141
|
+
OUTPUT: Return ONLY a JSON object with exactly these keys:
|
|
142
|
+
{
|
|
143
|
+
"name": "...",
|
|
144
|
+
"description": "..."
|
|
145
|
+
}
|
|
146
|
+
"""
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
def make_safe_function_name(name: str) -> str:
|
|
150
|
+
"""Convert a tool name to a valid Python function name."""
|
|
151
|
+
# Replace non-alphanumeric characters with underscores
|
|
152
|
+
safe_name = re.sub(r"[^a-zA-Z0-9_]", "_", name)
|
|
153
|
+
# Ensure the name doesn't start with a digit
|
|
154
|
+
if safe_name and safe_name[0].isdigit():
|
|
155
|
+
safe_name = f"tool_{safe_name}"
|
|
156
|
+
# Handle empty name edge case
|
|
157
|
+
if not safe_name:
|
|
158
|
+
safe_name = "unnamed_tool"
|
|
159
|
+
return safe_name
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
# Compile regex once for better performance
|
|
163
|
+
_RAISES_PATTERN = re.compile(r"\n\s*[Rr]aises\s*:.*$", re.DOTALL)
|
|
164
|
+
|
|
165
|
+
|
|
166
|
+
def _clean_docstring(docstring: str | None) -> str:
|
|
167
|
+
"""Remove the 'Raises:' section and everything after it from a docstring."""
|
|
168
|
+
if not docstring:
|
|
169
|
+
return ""
|
|
170
|
+
|
|
171
|
+
# Use pre-compiled regex for better performance
|
|
172
|
+
cleaned = _RAISES_PATTERN.sub("", docstring)
|
|
173
|
+
return cleaned.strip()
|
|
174
|
+
|
|
175
|
+
|
|
176
|
+
def build_tool_definitions(tools: list[Callable]) -> tuple[list[str], dict[str, Callable]]:
|
|
177
|
+
tool_definitions = []
|
|
178
|
+
context = {}
|
|
179
|
+
|
|
180
|
+
# Pre-allocate lists for better performance
|
|
181
|
+
tool_definitions = [None] * len(tools)
|
|
182
|
+
|
|
183
|
+
for i, tool in enumerate(tools):
|
|
184
|
+
tool_name = tool.__name__
|
|
185
|
+
cleaned_docstring = _clean_docstring(tool.__doc__)
|
|
186
|
+
|
|
187
|
+
# Pre-compute string parts to avoid repeated string operations
|
|
188
|
+
async_prefix = "async " if inspect.iscoroutinefunction(tool) else ""
|
|
189
|
+
signature = str(inspect.signature(tool))
|
|
190
|
+
|
|
191
|
+
tool_definitions[i] = f'''{async_prefix}def {tool_name} {signature}:
|
|
192
|
+
"""{cleaned_docstring}"""
|
|
193
|
+
...'''
|
|
194
|
+
context[tool_name] = tool
|
|
195
|
+
|
|
196
|
+
return tool_definitions, context
|
|
197
|
+
|
|
198
|
+
|
|
199
|
+
def create_default_prompt(
|
|
200
|
+
tools: list[Callable],
|
|
201
|
+
additional_tools: list[Callable],
|
|
202
|
+
base_prompt: str | None = None,
|
|
203
|
+
apps_string: str | None = None,
|
|
204
|
+
agent: object | None = None,
|
|
205
|
+
is_initial_prompt: bool = False,
|
|
206
|
+
):
|
|
207
|
+
if is_initial_prompt:
|
|
208
|
+
system_prompt = uneditable_prompt.strip()
|
|
209
|
+
if apps_string:
|
|
210
|
+
system_prompt += f"\n\n**Connected external applications (These apps have been logged into by the user):**\n{apps_string}\n\n Use `search_functions` to search for functions you can perform using the above. You can also discover more applications using the `search_functions` tool to find additional tools and integrations, if required. However, you MUST not assume the application when multiple apps are connected for a particular usecase.\n"
|
|
211
|
+
system_prompt += "\n\nIn addition to the Python Standard Library, you can use the following external functions:\n Carefully note which functions are normal and which functions are async. CRITICAL: Use `await` with async functions and async functions ONLY."
|
|
212
|
+
else:
|
|
213
|
+
system_prompt = ""
|
|
214
|
+
|
|
215
|
+
tool_definitions, tools_context = build_tool_definitions(tools + additional_tools)
|
|
216
|
+
system_prompt += "\n".join(tool_definitions)
|
|
217
|
+
|
|
218
|
+
if is_initial_prompt:
|
|
219
|
+
if base_prompt and base_prompt.strip():
|
|
220
|
+
system_prompt += (
|
|
221
|
+
f"\n\nUse the following information/instructions while completing your tasks:\n\n{base_prompt}"
|
|
222
|
+
)
|
|
223
|
+
|
|
224
|
+
# Append existing agent (plan + code) if provided
|
|
225
|
+
try:
|
|
226
|
+
if agent and hasattr(agent, "instructions"):
|
|
227
|
+
pb = agent.instructions or {}
|
|
228
|
+
plan = pb.get("plan")
|
|
229
|
+
code = pb.get("script")
|
|
230
|
+
if plan or code:
|
|
231
|
+
system_prompt += (
|
|
232
|
+
"\n\nYou have been provided an existing agent plan and code for performing a task.:\n"
|
|
233
|
+
)
|
|
234
|
+
if plan:
|
|
235
|
+
if isinstance(plan, list):
|
|
236
|
+
plan_block = "\n".join(f"- {str(s)}" for s in plan)
|
|
237
|
+
else:
|
|
238
|
+
plan_block = str(plan)
|
|
239
|
+
system_prompt += f"Plan Steps:\n{plan_block}\n"
|
|
240
|
+
if code:
|
|
241
|
+
system_prompt += f"\nScript:\n```python\n{str(code)}\n```\nThis function can be called by you using `execute_ipython_code`. Do NOT redefine the function, unless it has to be modified. For modifying it, you must enter agent_builder mode first so that it is modified in the database and not just the chat locally."
|
|
242
|
+
except Exception:
|
|
243
|
+
# Silently ignore formatting issues
|
|
244
|
+
pass
|
|
245
|
+
|
|
246
|
+
return system_prompt, tools_context
|
|
@@ -0,0 +1,162 @@
|
|
|
1
|
+
import ast
|
|
2
|
+
import contextlib
|
|
3
|
+
import inspect
|
|
4
|
+
import io
|
|
5
|
+
import pickle
|
|
6
|
+
import queue
|
|
7
|
+
import re
|
|
8
|
+
import socket
|
|
9
|
+
import threading
|
|
10
|
+
import types
|
|
11
|
+
from typing import Any
|
|
12
|
+
|
|
13
|
+
from langchain_core.tools import tool
|
|
14
|
+
|
|
15
|
+
from universal_mcp.agents.codeact01.utils import derive_context, inject_context, smart_truncate
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
async def eval_unsafe(
|
|
19
|
+
code: str, _locals: dict[str, Any], add_context: dict[str, Any], timeout: int = 180
|
|
20
|
+
) -> tuple[str, dict[str, Any], dict[str, Any]]:
|
|
21
|
+
"""
|
|
22
|
+
Execute code safely with a timeout.
|
|
23
|
+
- Returns (output_str, filtered_locals_dict, new_add_context)
|
|
24
|
+
- Errors or timeout are returned as output_str.
|
|
25
|
+
- Previous variables in _locals persist across calls.
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
EXCLUDE_TYPES = (
|
|
29
|
+
types.ModuleType,
|
|
30
|
+
type(re.match("", "")),
|
|
31
|
+
type(re.compile("")),
|
|
32
|
+
type(threading.Lock()),
|
|
33
|
+
type(threading.RLock()),
|
|
34
|
+
threading.Event,
|
|
35
|
+
threading.Condition,
|
|
36
|
+
threading.Semaphore,
|
|
37
|
+
queue.Queue,
|
|
38
|
+
socket.socket,
|
|
39
|
+
io.IOBase,
|
|
40
|
+
)
|
|
41
|
+
|
|
42
|
+
result_container = {"output": "<no output>"}
|
|
43
|
+
|
|
44
|
+
try:
|
|
45
|
+
compiled_code = compile(code, "<string>", "exec", flags=ast.PyCF_ALLOW_TOP_LEVEL_AWAIT)
|
|
46
|
+
with contextlib.redirect_stdout(io.StringIO()) as f:
|
|
47
|
+
coroutine = eval(compiled_code, _locals, _locals)
|
|
48
|
+
# Await the coroutine to run the code if it's async
|
|
49
|
+
if coroutine:
|
|
50
|
+
await coroutine
|
|
51
|
+
result_container["output"] = f.getvalue() or "<code ran, no output printed to stdout>"
|
|
52
|
+
except Exception as e:
|
|
53
|
+
result_container["output"] = f"Error during execution: {type(e).__name__}: {e}"
|
|
54
|
+
|
|
55
|
+
# If NameError for provider__tool occurred, append guidance (no retry)
|
|
56
|
+
try:
|
|
57
|
+
m = re.search(r"NameError:\s*name\s*'([^']+)'\s*is\s*not\s*defined", result_container["output"])
|
|
58
|
+
if m and "__" in m.group(1):
|
|
59
|
+
result_container["output"] += "\nHint: If it is a valid tool, load it before running this snippet."
|
|
60
|
+
except Exception:
|
|
61
|
+
pass
|
|
62
|
+
|
|
63
|
+
# Filter locals for picklable/storable variables
|
|
64
|
+
all_vars = {}
|
|
65
|
+
for key, value in _locals.items():
|
|
66
|
+
if key.startswith("__"):
|
|
67
|
+
continue
|
|
68
|
+
if inspect.iscoroutine(value) or inspect.iscoroutinefunction(value):
|
|
69
|
+
continue
|
|
70
|
+
if inspect.isasyncgen(value) or inspect.isasyncgenfunction(value):
|
|
71
|
+
continue
|
|
72
|
+
if isinstance(value, EXCLUDE_TYPES):
|
|
73
|
+
continue
|
|
74
|
+
if not callable(value) or not hasattr(value, "__name__"):
|
|
75
|
+
# Only keep if it can be pickled (serialized) successfully
|
|
76
|
+
try:
|
|
77
|
+
pickle.dumps(value)
|
|
78
|
+
all_vars[key] = value
|
|
79
|
+
except Exception:
|
|
80
|
+
pass
|
|
81
|
+
|
|
82
|
+
# Safely derive context
|
|
83
|
+
try:
|
|
84
|
+
new_add_context = derive_context(code, add_context)
|
|
85
|
+
except Exception:
|
|
86
|
+
new_add_context = add_context
|
|
87
|
+
|
|
88
|
+
return result_container["output"], all_vars, new_add_context
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
@tool(parse_docstring=True)
|
|
92
|
+
def execute_ipython_cell(snippet: str) -> str:
|
|
93
|
+
"""
|
|
94
|
+
Executes a Python code snippet in a sandbox with retained context (top level defined functions, variables, imports, loaded functions using `load_functions` are retained)
|
|
95
|
+
|
|
96
|
+
**Design Principles**:
|
|
97
|
+
- Break logic into 3-5 small helper functions (max 30 lines each).
|
|
98
|
+
- Keep large constants (e.g., multiline strings, dicts) global or in a dedicated helper function.
|
|
99
|
+
- Modify only the relevant helper during debugging—context persists across executions.
|
|
100
|
+
- Each helper function should do ONE thing
|
|
101
|
+
- Example:
|
|
102
|
+
def _get_json_schema():
|
|
103
|
+
return {"key1":"many details"...}
|
|
104
|
+
def _helper_function_1(...):
|
|
105
|
+
...
|
|
106
|
+
|
|
107
|
+
def _helper_function_2(...):
|
|
108
|
+
...
|
|
109
|
+
result1 = _helper_function_1(...)
|
|
110
|
+
smart_print(result1[:1]) #As an example, to check if it has been processed correctly
|
|
111
|
+
result2 = _helper_function_2(...)
|
|
112
|
+
smart_print(result2[:1])
|
|
113
|
+
final_result = ...
|
|
114
|
+
smart_print(final_result)
|
|
115
|
+
- Thus, while debugging, if you face an error in result2, you do not need to rewrite _helper_function_1() or _get_json_schema().
|
|
116
|
+
- External functions which return a dict or list[dict] are ambiguous. Therefore, you MUST explore the structure of the returned data using `smart_print()` statements before using it, printing keys and values. `smart_print` truncates long strings from data, preventing huge output logs.
|
|
117
|
+
- **Critical:LLM Function Usage Rules:**
|
|
118
|
+
- For text creation or document generation (e.g., HTML, Markdown, textual content for document/email), do not respond directly; you must use execute_ipython_cell with llm__generate_text.
|
|
119
|
+
- For any data extraction, text analysis, or classification task, always use the LLM functions inside python code (llm__extract_data, llm__classify_data, or llm__call_llm)-never rely on regex, manual parsing, or heuristic methods.
|
|
120
|
+
- Use llm__call_llm only as a fallback when the other functions don't match the task exactly.
|
|
121
|
+
- Regex or string manipulation can only be used when the text is fully structured.
|
|
122
|
+
- You can only import libraries that come pre-installed with Python. However, do consider using preloaded functions or searching for external functions first, using the search and load tools to access them in the code.
|
|
123
|
+
- Use loops to process multiple items—test once before scaling.
|
|
124
|
+
- Do not use this tool just to print or expose code execution output to the user. Use markdown without a tool call for final results.
|
|
125
|
+
|
|
126
|
+
Args:
|
|
127
|
+
snippet: Python code to execute.
|
|
128
|
+
|
|
129
|
+
Returns:
|
|
130
|
+
Execution result or error as a string.
|
|
131
|
+
|
|
132
|
+
Raises:
|
|
133
|
+
ValueError: If snippet is empty.
|
|
134
|
+
"""
|
|
135
|
+
# Validate required parameters
|
|
136
|
+
if not snippet or not snippet.strip():
|
|
137
|
+
raise ValueError("Parameter 'snippet' is required and cannot be empty or whitespace")
|
|
138
|
+
|
|
139
|
+
# Your actual execution logic would go here
|
|
140
|
+
return f"Successfully executed {len(snippet)} characters of Python code"
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
async def handle_execute_ipython_cell(
|
|
144
|
+
code: str,
|
|
145
|
+
tools_context: dict[str, Any],
|
|
146
|
+
eval_fn,
|
|
147
|
+
effective_previous_add_context: dict[str, Any],
|
|
148
|
+
effective_existing_context: dict[str, Any],
|
|
149
|
+
) -> tuple[str, dict[str, Any], dict[str, Any]]:
|
|
150
|
+
"""
|
|
151
|
+
Execute a code cell with shared state, supporting both sync and async eval functions.
|
|
152
|
+
|
|
153
|
+
Returns (output, new_context, new_add_context).
|
|
154
|
+
"""
|
|
155
|
+
context = {**tools_context, **effective_existing_context}
|
|
156
|
+
context = inject_context(effective_previous_add_context, context)
|
|
157
|
+
if inspect.iscoroutinefunction(eval_fn):
|
|
158
|
+
output, new_context, new_add_context = await eval_fn(code, context, effective_previous_add_context, 180)
|
|
159
|
+
else:
|
|
160
|
+
output, new_context, new_add_context = eval_fn(code, context, effective_previous_add_context, 180)
|
|
161
|
+
output = smart_truncate(output)
|
|
162
|
+
return output, new_context, new_add_context
|
|
@@ -1,11 +1,27 @@
|
|
|
1
1
|
from typing import Annotated, Any
|
|
2
2
|
|
|
3
|
-
from
|
|
3
|
+
from langchain.agents import AgentState
|
|
4
|
+
from pydantic import BaseModel, Field
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class AgentBuilderPlan(BaseModel):
|
|
8
|
+
steps: list[str] = Field(description="The steps of the agent.")
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class AgentBuilderCode(BaseModel):
|
|
12
|
+
code: str = Field(description="The Python code for the agent.")
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class AgentBuilderMeta(BaseModel):
|
|
16
|
+
name: str = Field(description="Concise, title-cased agent name (3-6 words).")
|
|
17
|
+
description: str = Field(description="Short, one-sentence description (<= 140 chars).")
|
|
4
18
|
|
|
5
19
|
|
|
6
20
|
def _enqueue(left: list, right: list) -> list:
|
|
7
21
|
"""Treat left as a FIFO queue, append new items from right (preserve order),
|
|
8
22
|
keep items unique, and cap total size to 20 (drop oldest items)."""
|
|
23
|
+
|
|
24
|
+
# Tool ifd are unique
|
|
9
25
|
max_size = 30
|
|
10
26
|
preferred_size = 20
|
|
11
27
|
if len(right) > preferred_size:
|
|
@@ -20,7 +36,7 @@ def _enqueue(left: list, right: list) -> list:
|
|
|
20
36
|
if len(queue) > preferred_size:
|
|
21
37
|
queue = queue[-preferred_size:]
|
|
22
38
|
|
|
23
|
-
return queue
|
|
39
|
+
return list(set(queue))
|
|
24
40
|
|
|
25
41
|
|
|
26
42
|
class CodeActState(AgentState):
|
|
@@ -30,13 +46,13 @@ class CodeActState(AgentState):
|
|
|
30
46
|
"""Dictionary containing the execution context with available tools and variables."""
|
|
31
47
|
add_context: dict[str, Any]
|
|
32
48
|
"""Dictionary containing the additional context (functions, classes, imports) to be added to the execution context."""
|
|
33
|
-
|
|
34
|
-
"""State for the
|
|
49
|
+
agent_builder_mode: str | None
|
|
50
|
+
"""State for the agent builder agent."""
|
|
35
51
|
selected_tool_ids: Annotated[list[str], _enqueue]
|
|
36
52
|
"""Queue for tools exported from registry"""
|
|
37
|
-
plan: str | None
|
|
38
|
-
"""Plan for the
|
|
39
|
-
|
|
40
|
-
"""
|
|
41
|
-
|
|
42
|
-
"""
|
|
53
|
+
plan: list[str] | None
|
|
54
|
+
"""Plan for the agent builder agent."""
|
|
55
|
+
agent_name: str | None
|
|
56
|
+
"""Generated agent name after confirmation."""
|
|
57
|
+
agent_description: str | None
|
|
58
|
+
"""Generated short description after confirmation."""
|