universal-mcp-agents 0.1.23__py3-none-any.whl → 0.1.24rc3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- universal_mcp/agents/__init__.py +11 -2
- universal_mcp/agents/base.py +3 -6
- universal_mcp/agents/codeact0/agent.py +14 -17
- universal_mcp/agents/codeact0/prompts.py +9 -3
- universal_mcp/agents/codeact0/sandbox.py +2 -2
- universal_mcp/agents/codeact0/tools.py +2 -2
- universal_mcp/agents/codeact0/utils.py +48 -0
- universal_mcp/agents/codeact00/__init__.py +3 -0
- universal_mcp/agents/codeact00/__main__.py +26 -0
- universal_mcp/agents/codeact00/agent.py +578 -0
- universal_mcp/agents/codeact00/config.py +77 -0
- universal_mcp/agents/codeact00/langgraph_agent.py +14 -0
- universal_mcp/agents/codeact00/llm_tool.py +25 -0
- universal_mcp/agents/codeact00/prompts.py +364 -0
- universal_mcp/agents/codeact00/sandbox.py +135 -0
- universal_mcp/agents/codeact00/state.py +66 -0
- universal_mcp/agents/codeact00/tools.py +525 -0
- universal_mcp/agents/codeact00/utils.py +678 -0
- universal_mcp/agents/codeact01/__init__.py +3 -0
- universal_mcp/agents/codeact01/__main__.py +26 -0
- universal_mcp/agents/codeact01/agent.py +413 -0
- universal_mcp/agents/codeact01/config.py +77 -0
- universal_mcp/agents/codeact01/langgraph_agent.py +14 -0
- universal_mcp/agents/codeact01/llm_tool.py +25 -0
- universal_mcp/agents/codeact01/prompts.py +246 -0
- universal_mcp/agents/codeact01/sandbox.py +162 -0
- universal_mcp/agents/codeact01/state.py +58 -0
- universal_mcp/agents/codeact01/tools.py +648 -0
- universal_mcp/agents/codeact01/utils.py +552 -0
- universal_mcp/agents/llm.py +7 -3
- universal_mcp/applications/llm/app.py +66 -15
- {universal_mcp_agents-0.1.23.dist-info → universal_mcp_agents-0.1.24rc3.dist-info}/METADATA +1 -1
- universal_mcp_agents-0.1.24rc3.dist-info/RECORD +66 -0
- universal_mcp_agents-0.1.23.dist-info/RECORD +0 -44
- {universal_mcp_agents-0.1.23.dist-info → universal_mcp_agents-0.1.24rc3.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
from typing import Any
|
|
2
|
+
|
|
3
|
+
from universal_mcp.agents.codeact00.utils import light_copy
|
|
4
|
+
|
|
5
|
+
MAX_RETRIES = 3
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def get_context_str(source: Any | list[Any] | dict[str, Any]) -> str:
|
|
9
|
+
"""Converts context to a string representation."""
|
|
10
|
+
if not isinstance(source, dict):
|
|
11
|
+
if isinstance(source, list):
|
|
12
|
+
source = {f"doc_{i + 1}": str(doc) for i, doc in enumerate(source)}
|
|
13
|
+
else:
|
|
14
|
+
source = {"content": str(source)}
|
|
15
|
+
|
|
16
|
+
return "\n".join(f"<{k}>\n{str(v)}\n</{k}>" for k, v in source.items())
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def smart_print(data: Any) -> None:
|
|
20
|
+
"""Prints a dictionary or list of dictionaries with string values truncated to 30 characters.
|
|
21
|
+
|
|
22
|
+
Args:
|
|
23
|
+
data: Either a dictionary with string keys, or a list of such dictionaries
|
|
24
|
+
"""
|
|
25
|
+
print(light_copy(data)) # noqa: T201
|
|
@@ -0,0 +1,364 @@
|
|
|
1
|
+
import inspect
|
|
2
|
+
import re
|
|
3
|
+
from collections.abc import Callable
|
|
4
|
+
|
|
5
|
+
uneditable_prompt = """
|
|
6
|
+
You are **Ruzo**, an AI Assistant created by AgentR — a creative, straight-forward, and direct principal software engineer with access to tools.
|
|
7
|
+
|
|
8
|
+
Your job is to answer the user's question or perform the task they ask for.
|
|
9
|
+
- Answer simple questions (which do not require you to write any code or access any external resources) directly. Note that any operation that involves using ONLY print functions should be answered directly in the chat. NEVER write a string or sequences of strings yourself and print it.
|
|
10
|
+
- For task requiring operations or access to external resources, you should achieve the task by executing Python code snippets.
|
|
11
|
+
- You have access to `execute_ipython_cell` tool that allows you to execute Python code in an IPython notebook cell.
|
|
12
|
+
- You also have access to two tools for finding and loading more python functions- `search_functions` and `load_functions`, which you must use for finding functions for using different external applications or additional functionality.
|
|
13
|
+
- Prioritize connected applications over unconnected ones from the output of `search_functions`. However, if the user specifically asks for an application, you MUST use that irrespective of connection status.
|
|
14
|
+
- When multiple relevant apps are connected, or none of the apps are connected, YOU MUST ask the user to choose the application(s). The search results may also inform you when such a case occurs, and you must stop and ask the user if multiple apps are relevant.
|
|
15
|
+
- If needed, feel free to ask for more information from the user (without using the `execute_ipython_cell` tool) to clarify the task.
|
|
16
|
+
|
|
17
|
+
**Code Design**:
|
|
18
|
+
- Structure your code into multiple small, well-defined functions within a single execution snippet. This ensures modularity and makes it easier to debug or update specific logic without rewriting or re-executing large portions of code. You can only rewrite the function/portion that you need to edit since the others are retained in context.
|
|
19
|
+
- Every snippet you execute using `execute_ipython_cell` MUST follow this structure:
|
|
20
|
+
- Break down logic into 3-5 small helper functions (30 lines each max)
|
|
21
|
+
- For definining large constants (multiline strings, dictionaries, etc) do it either at the global level or in a separate helper function responsible only for defining the object. Ensures that you do not have to rewrite large parts of the code multiple times during debugging/modifying.
|
|
22
|
+
- Each helper function should do ONE thing
|
|
23
|
+
- Example:
|
|
24
|
+
def _helper_function_1(...):
|
|
25
|
+
...
|
|
26
|
+
|
|
27
|
+
def _helper_function_2(...):
|
|
28
|
+
...
|
|
29
|
+
result1 = _helper_function_1(...)
|
|
30
|
+
smart_print(result1[:1]) #As an example, to check if it has been processed correctly
|
|
31
|
+
result2 = _helper_function_2(...)
|
|
32
|
+
smart_print(result2[:1])
|
|
33
|
+
final_result = ...
|
|
34
|
+
smart_print(final_result)
|
|
35
|
+
- Thus, while debugging, if you face an error in result2, you do not need to rewrite _helper_function_1.
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
**Code Writing Rules:**
|
|
39
|
+
- The code you write will be executed in a sandbox environment, and you can use the output of previous executions in your code. Variables, defined functions, imports, loaded functions are retained.
|
|
40
|
+
- DO NOT use the code execution to communicate/show anything to the user. The user is not able to see the output of the code cells, it is only for your processing and actions. Similarly, you should only use print/smart_print for your own analysis, the user does not get the output.
|
|
41
|
+
- Whenever you need to generate a large body of text, such as a document, an HTML file, or a report, use llm functions (see critical section below on LLM functions) and save the output file using save/upload functions. Do not generate text yourself and do not print the entire text in order to save your memory.
|
|
42
|
+
- External functions which return a dict or list[dict] are ambiguous. Therefore, you MUST explore the structure of the returned data using `smart_print()` statements before using it, printing keys and values. `smart_print` truncates long strings from data, preventing huge output logs.
|
|
43
|
+
- When an operation involves running a fixed set of steps on a list of items, run one run correctly and then use a for loop to run the steps on each item in the list.
|
|
44
|
+
- You can only import libraries that come pre-installed with Python. However, do consider searching for external functions first, using the search and load tools to access them in the code.
|
|
45
|
+
- For displaying final results to the user, you must present your output in markdown format, including image links, so that they are rendered and displayed to the user. The code output is NOT visible to the user.
|
|
46
|
+
- Call all functions using keyword arguments only, never positional arguments.
|
|
47
|
+
- NEVER use execute_ipython_cell for:
|
|
48
|
+
- Static analysis or commentary
|
|
49
|
+
- Text that could be written as markdown
|
|
50
|
+
- Final output summarization after analysis
|
|
51
|
+
- Anything that's just formatted print statements
|
|
52
|
+
|
|
53
|
+
**Critical:LLM Function Usage Rules:**
|
|
54
|
+
- For text creation or document generation (e.g., HTML, Markdown, textual content for document/email), do not respond directly; you must use execute_ipython_cell with llm__generate_text.
|
|
55
|
+
- For any data extraction, text analysis, or classification task, always use the LLM functions (llm__extract_data, llm__classify_data, or llm__call_llm)-never rely on regex, manual parsing, or heuristic methods.
|
|
56
|
+
- Use llm__call_llm only as a fallback when the other functions don't match the task exactly.
|
|
57
|
+
|
|
58
|
+
**Final Output Requirements:**
|
|
59
|
+
- Once you have all the information about the task, return the text directly to user in markdown format. Do NOT call `execute_ipython_cell` or any LLM tools again just for summarization. Do NOT use llm__generate_text for this purpose.
|
|
60
|
+
- Always respond in github flavoured markdown format.
|
|
61
|
+
- For charts and diagrams, use mermaid chart in markdown directly.
|
|
62
|
+
- Your final response should contain the complete answer to the user's request in a clear, well-formatted manner that directly addresses what they asked for.
|
|
63
|
+
- For file types like images, audio, documents, etc., you must use the `upload_file` tool to upload the file to the server and render the link/path in the markdown response.
|
|
64
|
+
"""
|
|
65
|
+
|
|
66
|
+
AGENT_BUILDER_PLANNING_PROMPT = """TASK: Analyze the conversation history and code execution to create a step-by-step non-technical plan for a reusable function.
|
|
67
|
+
Rules:
|
|
68
|
+
- Do NOT include the searching and loading of functions. Assume that the functions have already been loaded.
|
|
69
|
+
- The plan is a sequence of steps corresponding to the key logical steps taken to achieve the user's task in the conversation history, without focusing on technical specifics.
|
|
70
|
+
- You must output a JSON object with a single key "steps", which is a list of strings. Each string is a step in the agent.
|
|
71
|
+
- Identify user-provided information as variables that should become the main agent input parameters using `variable_name` syntax, enclosed by backticks `...`. Intermediate variables should be highlighted using italics, i.e. *...*, NEVER `...`
|
|
72
|
+
- Keep the logic generic and reusable. Avoid hardcoding any names/constants. Instead, keep them as variables with defaults. They should be represented as `variable_name(default = default_value)`.
|
|
73
|
+
- Have a human-friendly plan and inputs format. That is, it must not use internal IDs or keys used by APIs as either inputs or outputs to the overall plan; using them internally is okay.
|
|
74
|
+
- Be as concise as possible, especially for internal processing steps.
|
|
75
|
+
- For steps where the assistant's intelligence was used outside of the code to infer/decide/analyse something, replace it with the use of *llm__* functions in the plan if required.
|
|
76
|
+
|
|
77
|
+
Example Conversation History:
|
|
78
|
+
User Message: "Create an image using Gemini for Marvel Cinematic Universe in comic style"
|
|
79
|
+
Code snippet: image_result = await google_gemini__generate_image(prompt=prompt)
|
|
80
|
+
Assistant Message: "The image has been successfully generated [image_result]."
|
|
81
|
+
User Message: "Save the image in my OneDrive"
|
|
82
|
+
Code snippet: image_data = base64.b64decode(image_result['data'])
|
|
83
|
+
temp_file_path = tempfile.mktemp(suffix='.png')
|
|
84
|
+
with open(temp_file_path, 'wb') as f:
|
|
85
|
+
f.write(image_data)
|
|
86
|
+
# Upload the image to OneDrive with a descriptive filename
|
|
87
|
+
onedrive_filename = "Marvel_Cinematic_Universe_Comic_Style.png"
|
|
88
|
+
|
|
89
|
+
print(f"Uploading to OneDrive as: {onedrive_filename}")
|
|
90
|
+
|
|
91
|
+
# Upload to OneDrive root folder
|
|
92
|
+
upload_result = onedrive__upload_file(
|
|
93
|
+
file_path=temp_file_path,
|
|
94
|
+
parent_id='root',
|
|
95
|
+
file_name=onedrive_filename
|
|
96
|
+
)
|
|
97
|
+
|
|
98
|
+
Generated Steps:
|
|
99
|
+
"steps": [
|
|
100
|
+
"Generate an image using Gemini model with `image_prompt` and `style(default = 'comic')`",
|
|
101
|
+
"Upload the obtained image to OneDrive using `onedrive_filename(default = 'generated_image.png')` and `onedrive_parent_folder(default = 'root')`",
|
|
102
|
+
"Return confirmation of upload including file name and destination path, and link to the upload"
|
|
103
|
+
]
|
|
104
|
+
Note that internal variables like upload_result, image_result are not highlighted in the plan, and intermediate processing details are skipped.
|
|
105
|
+
Now create a plan based on the conversation history. Do not include any other text or explanation in your response. Just the JSON object.
|
|
106
|
+
Note that the following tools are pre-loaded for the agent's use, and can be inluded in your plan if needed as internal variables (especially the llm tools)-\n
|
|
107
|
+
"""
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
AGENT_BUILDER_PLANNER_PROMPT = """
|
|
111
|
+
You are the Agent Builder Planner. Decide whether planning can be finalized using the available context.
|
|
112
|
+
|
|
113
|
+
Operating rules:
|
|
114
|
+
- Do NOT output the final structured plan yourself.
|
|
115
|
+
- If the prior context already contains enough information to produce the plan, IMMEDIATELY call `finalize_planning` with no questions.
|
|
116
|
+
- For update requests: if it is unclear WHAT to change, ask a single, highly targeted question; otherwise infer reasonable updates from context and call `finalize_planning`.
|
|
117
|
+
- Only ask when truly necessary (true ambiguity/perplexity). If on the fence, prefer calling `finalize_planning`.
|
|
118
|
+
- Avoid summaries or small talk; either ask one concise question or call a tool.
|
|
119
|
+
- If the user decides not to proceed, call `cancel_planning`.
|
|
120
|
+
|
|
121
|
+
Role and handoff:
|
|
122
|
+
- You are the conversational front-end of the Agent Builder. Your job is to determine readiness.
|
|
123
|
+
- When ready, call `finalize_planning` to hand off to a downstream builder that will produce the structured plan and, later, the code from that plan.
|
|
124
|
+
- Never generate plan steps or code yourself.
|
|
125
|
+
"""
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
AGENT_BUILDER_GENERATING_PROMPT = """
|
|
129
|
+
You are tasked with generating granular, reusable Python code for an agent based on the final confirmed plan and the conversation history (user messages, assistant messages, and code executions).
|
|
130
|
+
|
|
131
|
+
Produce a set of small, single-purpose functions—typically one function per plan step—plus one top-level orchestrator function that calls the step functions in order to complete the task.
|
|
132
|
+
|
|
133
|
+
Rules-
|
|
134
|
+
- Do NOT include the searching and loading of functions. Assume required functions have already been loaded. Include imports you need.
|
|
135
|
+
- Your response must be **ONLY Python code**. No markdown or explanations.
|
|
136
|
+
- Define multiple top-level functions:
|
|
137
|
+
1) One small, clear function for each plan step (as granular as practical).
|
|
138
|
+
2) One top-level orchestrator function that calls the step functions in sequence to achieve the plan objectives.
|
|
139
|
+
- The orchestrator function's parameters **must exactly match the external variables** in the agent plan (the ones marked with backticks `` `variable_name` ``). Provide defaults exactly as specified in the plan when present. Variables in italics (i.e. enclosed in *...*) are internal and must not be orchestrator parameters.
|
|
140
|
+
- The orchestrator function MUST be declared with `def` or `async def` and be directly runnable with a single Python command (e.g., `image_generator(...)`). If it is async, assume the caller will `await` it.
|
|
141
|
+
- NEVER use asyncio or asyncio.run(). The code is executed in a ipython environment, so using await is enough.
|
|
142
|
+
- Step functions should accept only the inputs they need, return explicit outputs, and pass intermediate results forward via return values—not globals.
|
|
143
|
+
- Name functions in snake_case derived from their purpose/step. Use keyword arguments in calls; avoid positional-only calls.
|
|
144
|
+
- Keep the code self-contained and executable. Put imports at the top of the code. Do not nest functions unless strictly necessary.
|
|
145
|
+
- If previously executed code snippets exist, adapt and reuse their validated logic inside the appropriate step functions.
|
|
146
|
+
- Do not print the final output; return it from the orchestrator.
|
|
147
|
+
|
|
148
|
+
Example:
|
|
149
|
+
|
|
150
|
+
If the plan has:
|
|
151
|
+
|
|
152
|
+
"steps": [
|
|
153
|
+
"Receive creative description as image_prompt",
|
|
154
|
+
"Generate image using Gemini with style(default = 'comic')",
|
|
155
|
+
"Save temporary image internally as *temp_file_path*",
|
|
156
|
+
"Upload *temp_file_path* to OneDrive folder onedrive_parent_folder(default = 'root')"
|
|
157
|
+
]
|
|
158
|
+
|
|
159
|
+
Then the functions should look like:
|
|
160
|
+
|
|
161
|
+
```python
|
|
162
|
+
from typing import Dict
|
|
163
|
+
|
|
164
|
+
def generate_image(image_prompt: str, style: str = "comic") -> Dict:
|
|
165
|
+
# previously validated code to call Gemini
|
|
166
|
+
...
|
|
167
|
+
|
|
168
|
+
def save_temp_image(image_result: Dict) -> str:
|
|
169
|
+
# previously validated code to write bytes to a temp file
|
|
170
|
+
...
|
|
171
|
+
|
|
172
|
+
def upload_to_onedrive(temp_file_path: str, onedrive_parent_folder: str = "root") -> Dict:
|
|
173
|
+
# previously validated code to upload
|
|
174
|
+
...
|
|
175
|
+
|
|
176
|
+
def image_generator(image_prompt: str, style: str = "comic", onedrive_parent_folder: str = "root") -> Dict:
|
|
177
|
+
image_result = generate_image(image_prompt=image_prompt, style=style)
|
|
178
|
+
temp_file_path = save_temp_image(image_result=image_result)
|
|
179
|
+
upload_result = upload_to_onedrive(temp_file_path=temp_file_path, onedrive_parent_folder=onedrive_parent_folder)
|
|
180
|
+
return upload_result
|
|
181
|
+
```
|
|
182
|
+
|
|
183
|
+
Use this convention consistently to generate the final code.
|
|
184
|
+
Note that the following tools are pre-loaded for the agent's use, and can be included in your code-\n
|
|
185
|
+
"""
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
# Patch-based update prompts (minimal, Codex-style patch output)
|
|
189
|
+
|
|
190
|
+
AGENT_BUILDER_PLAN_PATCH_PROMPT = """
|
|
191
|
+
You are updating an existing agent plan represented as plain text (one step per line).
|
|
192
|
+
|
|
193
|
+
Output Requirements:
|
|
194
|
+
- ALWAYS output ONLY an OpenAI-style patch between the exact fences:
|
|
195
|
+
*** Begin Patch\n ... \n*** End Patch
|
|
196
|
+
- Use one or more @@ hunks with context lines (' '), deletions ('-'), and additions ('+').
|
|
197
|
+
- Make minimal edits; preserve unrelated lines and preserve step order unless a reordering is explicitly required.
|
|
198
|
+
- Do NOT include any prose, markdown, or code fences other than the patch fences.
|
|
199
|
+
|
|
200
|
+
Plan content constraints (apply while patching; do not rewrite the whole plan):
|
|
201
|
+
- Keep steps non-technical and human-friendly, describing goals/actions rather than implementation details.
|
|
202
|
+
- External inputs must be denoted as `variable_name`; include defaults as `variable_name(default = value)` when appropriate.
|
|
203
|
+
- Intermediate/internal variables must be italicized like *temp_file_path* (never in backticks).
|
|
204
|
+
- Avoid using internal IDs/keys as plan inputs. Keep inputs human-facing.
|
|
205
|
+
- Be concise; avoid unnecessary sub-steps. Prefer a small number of clear steps.
|
|
206
|
+
- Preserve existing variable names and defaults unless the context clearly requires a change.
|
|
207
|
+
- If removing or reordering steps, ensure downstream references remain coherent (do not reference a removed step).
|
|
208
|
+
- Preserve existing bullet/line formatting; one step per line.
|
|
209
|
+
- Idempotence: make the smallest delta that satisfies the requested update.
|
|
210
|
+
- For steps where the assistant's intelligence was used outside of the code to infer/decide/analyse something, replace it with the use of *llm__* functions in the plan if required.
|
|
211
|
+
|
|
212
|
+
Context will include the current plan and conversation history.
|
|
213
|
+
"""
|
|
214
|
+
|
|
215
|
+
|
|
216
|
+
AGENT_BUILDER_CODE_PATCH_PROMPT = """
|
|
217
|
+
You are updating existing Python code for an agent.
|
|
218
|
+
|
|
219
|
+
Output Requirements:
|
|
220
|
+
- ALWAYS output ONLY an OpenAI-style patch between the exact fences:
|
|
221
|
+
*** Begin Patch\n ... \n*** End Patch
|
|
222
|
+
- Use one or more @@ hunks with context (' '), deletions ('-'), additions ('+').
|
|
223
|
+
- Make minimal edits; preserve unrelated code and keep function/public API signatures stable unless the plan demands changes.
|
|
224
|
+
- Do NOT include any prose or markdown outside the patch.
|
|
225
|
+
- Do NOT wrap the patch in triple backticks; only use the patch fences shown above.
|
|
226
|
+
|
|
227
|
+
Context will include the current code and the confirmed plan.
|
|
228
|
+
|
|
229
|
+
Structural constraints (apply while patching; do not rewrite whole file):
|
|
230
|
+
- Maintain small, single-purpose functions (typically one per plan step) plus ONE top-level orchestrator that invokes them in order.
|
|
231
|
+
- The orchestrator parameters must exactly match the external variables in the plan, including defaults.
|
|
232
|
+
- Preserve function names and public signatures unless the plan explicitly requires a change; if a signature changes, update orchestrator and all call sites consistently in the same patch.
|
|
233
|
+
- Keep imports at the top; pass data via return values (no new globals); avoid nested functions unless necessary.
|
|
234
|
+
- Do not print final results; ensure the orchestrator returns the final value.
|
|
235
|
+
- Prefer adapting and reusing previously validated logic inside affected functions; do not rewrite unrelated functions.
|
|
236
|
+
|
|
237
|
+
Additional rules to ensure reliable, minimal patches:
|
|
238
|
+
- Environment: Code runs in IPython; if a function is async, callers will `await` it. Do NOT use `asyncio.run()` or create event loops.
|
|
239
|
+
- Calling style: Use keyword arguments (no positional-only calls) when you modify call sites.
|
|
240
|
+
- Imports: Add only necessary imports; deduplicate and keep existing import order/formatting when possible.
|
|
241
|
+
- Formatting: Preserve existing formatting, indentation, comments, and docstrings for unchanged code. Do NOT reformat the file.
|
|
242
|
+
- Exceptions/IO: Preserve existing error handling semantics; do not introduce interactive input or random printing.
|
|
243
|
+
- Idempotence: Make the smallest change set that satisfies the plan; avoid broad refactors.
|
|
244
|
+
"""
|
|
245
|
+
|
|
246
|
+
|
|
247
|
+
AGENT_BUILDER_META_PROMPT = """
|
|
248
|
+
You are preparing metadata for a reusable agent based on the confirmed step-by-step plan.
|
|
249
|
+
|
|
250
|
+
TASK: Create a concise, human-friendly name and a short description for the agent.
|
|
251
|
+
|
|
252
|
+
INPUTS:
|
|
253
|
+
- Conversation context and plan steps will be provided in prior messages
|
|
254
|
+
|
|
255
|
+
REQUIREMENTS:
|
|
256
|
+
1. Name: 3-6 words, Title Case, no punctuation except hyphens if needed
|
|
257
|
+
2. Description: Single sentence, <= 140 characters, clearly states what the agent does
|
|
258
|
+
|
|
259
|
+
OUTPUT: Return ONLY a JSON object with exactly these keys:
|
|
260
|
+
{
|
|
261
|
+
"name": "...",
|
|
262
|
+
"description": "..."
|
|
263
|
+
}
|
|
264
|
+
"""
|
|
265
|
+
|
|
266
|
+
|
|
267
|
+
def make_safe_function_name(name: str) -> str:
|
|
268
|
+
"""Convert a tool name to a valid Python function name."""
|
|
269
|
+
# Replace non-alphanumeric characters with underscores
|
|
270
|
+
safe_name = re.sub(r"[^a-zA-Z0-9_]", "_", name)
|
|
271
|
+
# Ensure the name doesn't start with a digit
|
|
272
|
+
if safe_name and safe_name[0].isdigit():
|
|
273
|
+
safe_name = f"tool_{safe_name}"
|
|
274
|
+
# Handle empty name edge case
|
|
275
|
+
if not safe_name:
|
|
276
|
+
safe_name = "unnamed_tool"
|
|
277
|
+
return safe_name
|
|
278
|
+
|
|
279
|
+
|
|
280
|
+
# Compile regex once for better performance
|
|
281
|
+
_RAISES_PATTERN = re.compile(r"\n\s*[Rr]aises\s*:.*$", re.DOTALL)
|
|
282
|
+
|
|
283
|
+
|
|
284
|
+
def _clean_docstring(docstring: str | None) -> str:
|
|
285
|
+
"""Remove the 'Raises:' section and everything after it from a docstring."""
|
|
286
|
+
if not docstring:
|
|
287
|
+
return ""
|
|
288
|
+
|
|
289
|
+
# Use pre-compiled regex for better performance
|
|
290
|
+
cleaned = _RAISES_PATTERN.sub("", docstring)
|
|
291
|
+
return cleaned.strip()
|
|
292
|
+
|
|
293
|
+
|
|
294
|
+
def build_tool_definitions(tools: list[Callable]) -> tuple[list[str], dict[str, Callable]]:
|
|
295
|
+
tool_definitions = []
|
|
296
|
+
context = {}
|
|
297
|
+
|
|
298
|
+
# Pre-allocate lists for better performance
|
|
299
|
+
tool_definitions = [None] * len(tools)
|
|
300
|
+
|
|
301
|
+
for i, tool in enumerate(tools):
|
|
302
|
+
tool_name = tool.__name__
|
|
303
|
+
cleaned_docstring = _clean_docstring(tool.__doc__)
|
|
304
|
+
|
|
305
|
+
# Pre-compute string parts to avoid repeated string operations
|
|
306
|
+
async_prefix = "async " if inspect.iscoroutinefunction(tool) else ""
|
|
307
|
+
signature = str(inspect.signature(tool))
|
|
308
|
+
|
|
309
|
+
tool_definitions[i] = f'''{async_prefix}def {tool_name} {signature}:
|
|
310
|
+
"""{cleaned_docstring}"""
|
|
311
|
+
...'''
|
|
312
|
+
context[tool_name] = tool
|
|
313
|
+
|
|
314
|
+
return tool_definitions, context
|
|
315
|
+
|
|
316
|
+
|
|
317
|
+
def create_default_prompt(
|
|
318
|
+
tools: list[Callable],
|
|
319
|
+
additional_tools: list[Callable],
|
|
320
|
+
base_prompt: str | None = None,
|
|
321
|
+
apps_string: str | None = None,
|
|
322
|
+
agent: object | None = None,
|
|
323
|
+
is_initial_prompt: bool = False,
|
|
324
|
+
):
|
|
325
|
+
if is_initial_prompt:
|
|
326
|
+
system_prompt = uneditable_prompt.strip()
|
|
327
|
+
if apps_string:
|
|
328
|
+
system_prompt += f"\n\n**Connected external applications (These apps have been logged into by the user):**\n{apps_string}\n\n Use `search_functions` to search for functions you can perform using the above. You can also discover more applications using the `search_functions` tool to find additional tools and integrations, if required. However, you MUST not assume the application when multiple apps are connected for a particular usecase.\n"
|
|
329
|
+
system_prompt += "\n\nIn addition to the Python Standard Library, you can use the following external functions:\n Carefully note which functions are normal and which functions are async. CRITICAL: Use `await` with async functions and async functions ONLY."
|
|
330
|
+
else:
|
|
331
|
+
system_prompt = ""
|
|
332
|
+
|
|
333
|
+
tool_definitions, tools_context = build_tool_definitions(tools + additional_tools)
|
|
334
|
+
system_prompt += "\n".join(tool_definitions)
|
|
335
|
+
|
|
336
|
+
if is_initial_prompt:
|
|
337
|
+
if base_prompt and base_prompt.strip():
|
|
338
|
+
system_prompt += (
|
|
339
|
+
f"\n\nUse the following information/instructions while completing your tasks:\n\n{base_prompt}"
|
|
340
|
+
)
|
|
341
|
+
|
|
342
|
+
# Append existing agent (plan + code) if provided
|
|
343
|
+
try:
|
|
344
|
+
if agent and hasattr(agent, "instructions"):
|
|
345
|
+
pb = agent.instructions or {}
|
|
346
|
+
plan = pb.get("plan")
|
|
347
|
+
code = pb.get("script")
|
|
348
|
+
if plan or code:
|
|
349
|
+
system_prompt += (
|
|
350
|
+
"\n\nYou have been provided an existing agent plan and code for performing a task.:\n"
|
|
351
|
+
)
|
|
352
|
+
if plan:
|
|
353
|
+
if isinstance(plan, list):
|
|
354
|
+
plan_block = "\n".join(f"- {str(s)}" for s in plan)
|
|
355
|
+
else:
|
|
356
|
+
plan_block = str(plan)
|
|
357
|
+
system_prompt += f"Plan Steps:\n{plan_block}\n"
|
|
358
|
+
if code:
|
|
359
|
+
system_prompt += f"\nScript:\n```python\n{str(code)}\n```\nThis function can be called by you using `execute_ipython_code`. Do NOT redefine the function, unless it has to be modified. For modifying it, you must enter agent_builder mode first so that it is modified in the database and not just the chat locally."
|
|
360
|
+
except Exception:
|
|
361
|
+
# Silently ignore formatting issues
|
|
362
|
+
pass
|
|
363
|
+
|
|
364
|
+
return system_prompt, tools_context
|
|
@@ -0,0 +1,135 @@
|
|
|
1
|
+
import ast
|
|
2
|
+
import contextlib
|
|
3
|
+
import inspect
|
|
4
|
+
import io
|
|
5
|
+
import pickle
|
|
6
|
+
import queue
|
|
7
|
+
import re
|
|
8
|
+
import socket
|
|
9
|
+
import threading
|
|
10
|
+
import types
|
|
11
|
+
from typing import Any
|
|
12
|
+
|
|
13
|
+
from langchain_core.tools import tool
|
|
14
|
+
|
|
15
|
+
from universal_mcp.agents.codeact00.utils import derive_context, inject_context, smart_truncate
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
async def eval_unsafe(
|
|
19
|
+
code: str, _locals: dict[str, Any], add_context: dict[str, Any], timeout: int = 180
|
|
20
|
+
) -> tuple[str, dict[str, Any], dict[str, Any]]:
|
|
21
|
+
"""
|
|
22
|
+
Execute code safely with a timeout.
|
|
23
|
+
- Returns (output_str, filtered_locals_dict, new_add_context)
|
|
24
|
+
- Errors or timeout are returned as output_str.
|
|
25
|
+
- Previous variables in _locals persist across calls.
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
EXCLUDE_TYPES = (
|
|
29
|
+
types.ModuleType,
|
|
30
|
+
type(re.match("", "")),
|
|
31
|
+
type(re.compile("")),
|
|
32
|
+
type(threading.Lock()),
|
|
33
|
+
type(threading.RLock()),
|
|
34
|
+
threading.Event,
|
|
35
|
+
threading.Condition,
|
|
36
|
+
threading.Semaphore,
|
|
37
|
+
queue.Queue,
|
|
38
|
+
socket.socket,
|
|
39
|
+
io.IOBase,
|
|
40
|
+
)
|
|
41
|
+
|
|
42
|
+
result_container = {"output": "<no output>"}
|
|
43
|
+
|
|
44
|
+
try:
|
|
45
|
+
compiled_code = compile(code, "<string>", "exec", flags=ast.PyCF_ALLOW_TOP_LEVEL_AWAIT)
|
|
46
|
+
with contextlib.redirect_stdout(io.StringIO()) as f:
|
|
47
|
+
coroutine = eval(compiled_code, _locals, _locals)
|
|
48
|
+
# Await the coroutine to run the code if it's async
|
|
49
|
+
if coroutine:
|
|
50
|
+
await coroutine
|
|
51
|
+
result_container["output"] = f.getvalue() or "<code ran, no output printed to stdout>"
|
|
52
|
+
except Exception as e:
|
|
53
|
+
result_container["output"] = f"Error during execution: {type(e).__name__}: {e}"
|
|
54
|
+
|
|
55
|
+
# If NameError for provider__tool occurred, append guidance (no retry)
|
|
56
|
+
try:
|
|
57
|
+
m = re.search(r"NameError:\s*name\s*'([^']+)'\s*is\s*not\s*defined", result_container["output"])
|
|
58
|
+
if m and "__" in m.group(1):
|
|
59
|
+
result_container["output"] += "\nHint: If it is a valid tool, load it before running this snippet."
|
|
60
|
+
except Exception:
|
|
61
|
+
pass
|
|
62
|
+
|
|
63
|
+
# Filter locals for picklable/storable variables
|
|
64
|
+
all_vars = {}
|
|
65
|
+
for key, value in _locals.items():
|
|
66
|
+
if key.startswith("__"):
|
|
67
|
+
continue
|
|
68
|
+
if inspect.iscoroutine(value) or inspect.iscoroutinefunction(value):
|
|
69
|
+
continue
|
|
70
|
+
if inspect.isasyncgen(value) or inspect.isasyncgenfunction(value):
|
|
71
|
+
continue
|
|
72
|
+
if isinstance(value, EXCLUDE_TYPES):
|
|
73
|
+
continue
|
|
74
|
+
if not callable(value) or not hasattr(value, "__name__"):
|
|
75
|
+
# Only keep if it can be pickled (serialized) successfully
|
|
76
|
+
try:
|
|
77
|
+
pickle.dumps(value)
|
|
78
|
+
all_vars[key] = value
|
|
79
|
+
except Exception:
|
|
80
|
+
pass
|
|
81
|
+
|
|
82
|
+
# Safely derive context
|
|
83
|
+
try:
|
|
84
|
+
new_add_context = derive_context(code, add_context)
|
|
85
|
+
except Exception:
|
|
86
|
+
new_add_context = add_context
|
|
87
|
+
|
|
88
|
+
return result_container["output"], all_vars, new_add_context
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
@tool(parse_docstring=True)
|
|
92
|
+
def execute_ipython_cell(snippet: str) -> str:
|
|
93
|
+
"""
|
|
94
|
+
Executes Python code in an IPython notebook cell:
|
|
95
|
+
* The output generated by the notebook cell is returned by this tool
|
|
96
|
+
* State is persistent across executions and discussions with the user
|
|
97
|
+
* The input code may reference variables created in previous executions
|
|
98
|
+
|
|
99
|
+
Args:
|
|
100
|
+
snippet: The Python code to execute.
|
|
101
|
+
|
|
102
|
+
Returns:
|
|
103
|
+
String containing the execution output or error message.
|
|
104
|
+
|
|
105
|
+
Raises:
|
|
106
|
+
ValueError: If snippet is empty.
|
|
107
|
+
"""
|
|
108
|
+
# Validate required parameters
|
|
109
|
+
if not snippet or not snippet.strip():
|
|
110
|
+
raise ValueError("Parameter 'snippet' is required and cannot be empty or whitespace")
|
|
111
|
+
|
|
112
|
+
# Your actual execution logic would go here
|
|
113
|
+
return f"Successfully executed {len(snippet)} characters of Python code"
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
async def handle_execute_ipython_cell(
|
|
117
|
+
code: str,
|
|
118
|
+
tools_context: dict[str, Any],
|
|
119
|
+
eval_fn,
|
|
120
|
+
effective_previous_add_context: dict[str, Any],
|
|
121
|
+
effective_existing_context: dict[str, Any],
|
|
122
|
+
) -> tuple[str, dict[str, Any], dict[str, Any]]:
|
|
123
|
+
"""
|
|
124
|
+
Execute a code cell with shared state, supporting both sync and async eval functions.
|
|
125
|
+
|
|
126
|
+
Returns (output, new_context, new_add_context).
|
|
127
|
+
"""
|
|
128
|
+
context = {**tools_context, **effective_existing_context}
|
|
129
|
+
context = inject_context(effective_previous_add_context, context)
|
|
130
|
+
if inspect.iscoroutinefunction(eval_fn):
|
|
131
|
+
output, new_context, new_add_context = await eval_fn(code, context, effective_previous_add_context, 180)
|
|
132
|
+
else:
|
|
133
|
+
output, new_context, new_add_context = eval_fn(code, context, effective_previous_add_context, 180)
|
|
134
|
+
output = smart_truncate(output)
|
|
135
|
+
return output, new_context, new_add_context
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
from typing import Annotated, Any
|
|
2
|
+
|
|
3
|
+
from langchain.agents import AgentState
|
|
4
|
+
from pydantic import BaseModel, Field
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class AgentBuilderPlan(BaseModel):
|
|
8
|
+
steps: list[str] = Field(description="The steps of the agent.")
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class AgentBuilderCode(BaseModel):
|
|
12
|
+
code: str = Field(description="The Python code for the agent.")
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class AgentBuilderMeta(BaseModel):
|
|
16
|
+
name: str = Field(description="Concise, title-cased agent name (3-6 words).")
|
|
17
|
+
description: str = Field(description="Short, one-sentence description (<= 140 chars).")
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class AgentBuilderPatch(BaseModel):
|
|
21
|
+
patch: str = Field(
|
|
22
|
+
description=(
|
|
23
|
+
"OpenAI-style patch text wrapped between '*** Begin Patch' and '*** End Patch' fences."
|
|
24
|
+
)
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def _enqueue(left: list, right: list) -> list:
|
|
29
|
+
"""Treat left as a FIFO queue, append new items from right (preserve order),
|
|
30
|
+
keep items unique, and cap total size to 20 (drop oldest items)."""
|
|
31
|
+
|
|
32
|
+
# Tool ifd are unique
|
|
33
|
+
max_size = 30
|
|
34
|
+
preferred_size = 20
|
|
35
|
+
if len(right) > preferred_size:
|
|
36
|
+
preferred_size = min(max_size, len(right))
|
|
37
|
+
queue = list(left or [])
|
|
38
|
+
|
|
39
|
+
for item in right[:preferred_size] or []:
|
|
40
|
+
if item in queue:
|
|
41
|
+
queue.remove(item)
|
|
42
|
+
queue.append(item)
|
|
43
|
+
|
|
44
|
+
if len(queue) > preferred_size:
|
|
45
|
+
queue = queue[-preferred_size:]
|
|
46
|
+
|
|
47
|
+
return list(set(queue))
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
class CodeActState(AgentState):
|
|
51
|
+
"""State for CodeAct agent."""
|
|
52
|
+
|
|
53
|
+
context: dict[str, Any]
|
|
54
|
+
"""Dictionary containing the execution context with available tools and variables."""
|
|
55
|
+
add_context: dict[str, Any]
|
|
56
|
+
"""Dictionary containing the additional context (functions, classes, imports) to be added to the execution context."""
|
|
57
|
+
agent_builder_mode: str | None
|
|
58
|
+
"""State for the agent builder agent."""
|
|
59
|
+
selected_tool_ids: Annotated[list[str], _enqueue]
|
|
60
|
+
"""Queue for tools exported from registry"""
|
|
61
|
+
plan: list[str] | None
|
|
62
|
+
"""Plan for the agent builder agent."""
|
|
63
|
+
agent_name: str | None
|
|
64
|
+
"""Generated agent name after confirmation."""
|
|
65
|
+
agent_description: str | None
|
|
66
|
+
"""Generated short description after confirmation."""
|