universal-mcp-agents 0.1.19rc1__py3-none-any.whl → 0.1.20rc1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of universal-mcp-agents might be problematic. Click here for more details.
- universal_mcp/agents/__init__.py +5 -9
- universal_mcp/agents/base.py +4 -1
- universal_mcp/agents/cli.py +0 -3
- universal_mcp/agents/codeact0/__init__.py +2 -3
- universal_mcp/agents/codeact0/__main__.py +2 -2
- universal_mcp/agents/codeact0/agent.py +231 -83
- universal_mcp/agents/codeact0/langgraph_agent.py +1 -1
- universal_mcp/agents/codeact0/prompts.py +38 -5
- universal_mcp/agents/codeact0/sandbox.py +31 -1
- universal_mcp/agents/codeact0/state.py +3 -1
- universal_mcp/agents/codeact0/tools.py +200 -85
- {universal_mcp_agents-0.1.19rc1.dist-info → universal_mcp_agents-0.1.20rc1.dist-info}/METADATA +1 -1
- universal_mcp_agents-0.1.20rc1.dist-info/RECORD +44 -0
- universal_mcp/agents/codeact/__init__.py +0 -3
- universal_mcp/agents/codeact/__main__.py +0 -33
- universal_mcp/agents/codeact/agent.py +0 -240
- universal_mcp/agents/codeact/models.py +0 -11
- universal_mcp/agents/codeact/prompts.py +0 -82
- universal_mcp/agents/codeact/sandbox.py +0 -85
- universal_mcp/agents/codeact/state.py +0 -11
- universal_mcp/agents/codeact/utils.py +0 -68
- universal_mcp/agents/codeact0/playbook_agent.py +0 -355
- universal_mcp/agents/unified/README.md +0 -45
- universal_mcp/agents/unified/__init__.py +0 -3
- universal_mcp/agents/unified/__main__.py +0 -28
- universal_mcp/agents/unified/agent.py +0 -289
- universal_mcp/agents/unified/langgraph_agent.py +0 -14
- universal_mcp/agents/unified/llm_tool.py +0 -25
- universal_mcp/agents/unified/prompts.py +0 -192
- universal_mcp/agents/unified/sandbox.py +0 -101
- universal_mcp/agents/unified/state.py +0 -42
- universal_mcp/agents/unified/tools.py +0 -188
- universal_mcp/agents/unified/utils.py +0 -388
- universal_mcp_agents-0.1.19rc1.dist-info/RECORD +0 -64
- {universal_mcp_agents-0.1.19rc1.dist-info → universal_mcp_agents-0.1.20rc1.dist-info}/WHEEL +0 -0
|
@@ -1,289 +0,0 @@
|
|
|
1
|
-
import json
|
|
2
|
-
import re
|
|
3
|
-
from typing import Literal, cast
|
|
4
|
-
|
|
5
|
-
from langchain_core.messages import AIMessage, ToolMessage
|
|
6
|
-
from langchain_core.tools import StructuredTool
|
|
7
|
-
from langchain_core.tools import tool as create_tool
|
|
8
|
-
from langgraph.checkpoint.base import BaseCheckpointSaver
|
|
9
|
-
from langgraph.graph import START, StateGraph
|
|
10
|
-
from langgraph.types import Command, RetryPolicy
|
|
11
|
-
from loguru import logger
|
|
12
|
-
from universal_mcp.tools.registry import ToolRegistry
|
|
13
|
-
from universal_mcp.types import ToolConfig, ToolFormat
|
|
14
|
-
|
|
15
|
-
from universal_mcp.agents.base import BaseAgent
|
|
16
|
-
from universal_mcp.agents.llm import load_chat_model
|
|
17
|
-
from universal_mcp.agents.utils import convert_tool_ids_to_dict, filter_retry_on, get_message_text
|
|
18
|
-
|
|
19
|
-
from .llm_tool import smart_print
|
|
20
|
-
from .prompts import (
|
|
21
|
-
PLAYBOOK_CONFIRMING_PROMPT,
|
|
22
|
-
PLAYBOOK_GENERATING_PROMPT,
|
|
23
|
-
PLAYBOOK_PLANNING_PROMPT,
|
|
24
|
-
create_default_prompt,
|
|
25
|
-
)
|
|
26
|
-
from .sandbox import eval_unsafe
|
|
27
|
-
from .state import CodeActState
|
|
28
|
-
from .tools import create_meta_tools, enter_playbook_mode, get_valid_tools
|
|
29
|
-
from .utils import inject_context, smart_truncate
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
class UnifiedAgent(BaseAgent):
|
|
33
|
-
def __init__(
|
|
34
|
-
self,
|
|
35
|
-
name: str,
|
|
36
|
-
instructions: str,
|
|
37
|
-
model: str,
|
|
38
|
-
memory: BaseCheckpointSaver | None = None,
|
|
39
|
-
tools: ToolConfig | None = None,
|
|
40
|
-
registry: ToolRegistry | None = None,
|
|
41
|
-
playbook_registry: object | None = None,
|
|
42
|
-
sandbox_timeout: int = 20,
|
|
43
|
-
**kwargs,
|
|
44
|
-
):
|
|
45
|
-
super().__init__(
|
|
46
|
-
name=name,
|
|
47
|
-
instructions=instructions,
|
|
48
|
-
model=model,
|
|
49
|
-
memory=memory,
|
|
50
|
-
**kwargs,
|
|
51
|
-
)
|
|
52
|
-
self.model_instance = load_chat_model(model)
|
|
53
|
-
self.tools_config = tools or {}
|
|
54
|
-
self.registry = registry
|
|
55
|
-
self.playbook_registry = playbook_registry
|
|
56
|
-
self.sandbox_timeout = sandbox_timeout
|
|
57
|
-
self.eval_fn = eval_unsafe
|
|
58
|
-
if self.tools_config and not self.registry:
|
|
59
|
-
raise ValueError("Registry must be provided with tools")
|
|
60
|
-
|
|
61
|
-
async def _build_graph(self): # noqa: PLR0915
|
|
62
|
-
meta_tools = create_meta_tools(self.registry)
|
|
63
|
-
additional_tools = [smart_print, meta_tools["web_search"]]
|
|
64
|
-
self.additional_tools = [t if isinstance(t, StructuredTool) else create_tool(t) for t in additional_tools]
|
|
65
|
-
self.default_tools = await self.registry.export_tools(self.tools_config, ToolFormat.LANGCHAIN)
|
|
66
|
-
|
|
67
|
-
async def call_model(state: CodeActState) -> Command[Literal["sandbox", "execute_tools"]]:
|
|
68
|
-
self.exported_tools = []
|
|
69
|
-
|
|
70
|
-
selected_tool_ids = state.get("selected_tool_ids", [])
|
|
71
|
-
self.exported_tools = await self.registry.export_tools(selected_tool_ids, ToolFormat.LANGCHAIN)
|
|
72
|
-
all_tools = self.exported_tools + self.additional_tools
|
|
73
|
-
self.final_instructions, self.tools_context = create_default_prompt(all_tools, self.instructions)
|
|
74
|
-
messages = [{"role": "user", "content": self.final_instructions}] + state["messages"]
|
|
75
|
-
|
|
76
|
-
if state.get("output"):
|
|
77
|
-
messages.append(
|
|
78
|
-
{
|
|
79
|
-
"role": "system",
|
|
80
|
-
"content": f"The last code execution resulted in this output:\n{state['output']}",
|
|
81
|
-
}
|
|
82
|
-
)
|
|
83
|
-
|
|
84
|
-
# Run the model and potentially loop for reflection
|
|
85
|
-
model_with_tools = self.model_instance.bind_tools(
|
|
86
|
-
tools=[
|
|
87
|
-
enter_playbook_mode,
|
|
88
|
-
meta_tools["search_functions"],
|
|
89
|
-
meta_tools["load_functions"],
|
|
90
|
-
],
|
|
91
|
-
tool_choice="auto",
|
|
92
|
-
)
|
|
93
|
-
response = cast(AIMessage, model_with_tools.invoke(messages))
|
|
94
|
-
response_text = get_message_text(response)
|
|
95
|
-
code_match = re.search(r"```python\n(.*?)\n```", response_text, re.DOTALL)
|
|
96
|
-
|
|
97
|
-
if code_match:
|
|
98
|
-
code = code_match.group(1).strip()
|
|
99
|
-
return Command(goto="sandbox", update={"messages": [response], "code": code, "output": ""})
|
|
100
|
-
elif response.tool_calls:
|
|
101
|
-
return Command(goto="execute_tools", update={"messages": [response]})
|
|
102
|
-
else:
|
|
103
|
-
return Command(update={"messages": [response]})
|
|
104
|
-
|
|
105
|
-
async def execute_tools(state: CodeActState) -> Command[Literal["call_model", "playbook", "sandbox"]]:
|
|
106
|
-
"""Execute tool calls"""
|
|
107
|
-
last_message = state["messages"][-1]
|
|
108
|
-
tool_calls = last_message.tool_calls if isinstance(last_message, AIMessage) else []
|
|
109
|
-
|
|
110
|
-
tool_messages = []
|
|
111
|
-
new_tool_ids = []
|
|
112
|
-
ask_user = False
|
|
113
|
-
ai_msg = ""
|
|
114
|
-
tool_result = ""
|
|
115
|
-
|
|
116
|
-
for tool_call in tool_calls:
|
|
117
|
-
try:
|
|
118
|
-
if tool_call["name"] == "enter_playbook_mode":
|
|
119
|
-
tool_message = ToolMessage(
|
|
120
|
-
content=json.dumps("Entered Playbook Mode."),
|
|
121
|
-
name=tool_call["name"],
|
|
122
|
-
tool_call_id=tool_call["id"],
|
|
123
|
-
)
|
|
124
|
-
return Command(
|
|
125
|
-
goto="playbook",
|
|
126
|
-
update={"playbook_mode": "planning", "messages": [tool_message]}, # Entered Playbook mode
|
|
127
|
-
)
|
|
128
|
-
elif tool_call["name"] == "load_functions": # Handle load_functions separately
|
|
129
|
-
valid_tools, unconnected_links = await get_valid_tools(
|
|
130
|
-
tool_ids=tool_call["args"]["tool_ids"], registry=self.registry
|
|
131
|
-
)
|
|
132
|
-
new_tool_ids.extend(valid_tools)
|
|
133
|
-
# Create tool message response
|
|
134
|
-
tool_result = f"Successfully loaded {len(valid_tools)} tools: {valid_tools}"
|
|
135
|
-
links = "\n".join(unconnected_links)
|
|
136
|
-
if links:
|
|
137
|
-
ask_user = True
|
|
138
|
-
ai_msg = f"Please login to the following app(s) using the following links and let me know in order to proceed:\n {links} "
|
|
139
|
-
elif tool_call["name"] == "search_functions":
|
|
140
|
-
tool_result = await meta_tools["search_functions"].ainvoke(tool_call["args"])
|
|
141
|
-
except Exception as e:
|
|
142
|
-
tool_result = f"Error during {tool_call}: {e}"
|
|
143
|
-
|
|
144
|
-
tool_message = ToolMessage(
|
|
145
|
-
content=json.dumps(tool_result),
|
|
146
|
-
name=tool_call["name"],
|
|
147
|
-
tool_call_id=tool_call["id"],
|
|
148
|
-
)
|
|
149
|
-
tool_messages.append(tool_message)
|
|
150
|
-
|
|
151
|
-
if ask_user:
|
|
152
|
-
tool_messages.append(AIMessage(content=ai_msg))
|
|
153
|
-
return Command(update={"messages": tool_messages, "selected_tool_ids": new_tool_ids})
|
|
154
|
-
|
|
155
|
-
return Command(goto="call_model", update={"messages": tool_messages, "selected_tool_ids": new_tool_ids})
|
|
156
|
-
|
|
157
|
-
def sandbox(state: CodeActState) -> Command[Literal["call_model"]]:
|
|
158
|
-
code = state.get("code")
|
|
159
|
-
|
|
160
|
-
if not code:
|
|
161
|
-
logger.error("Sandbox called without code")
|
|
162
|
-
return Command(
|
|
163
|
-
goto="call_model",
|
|
164
|
-
update={"output": "Sandbox was called without any code to execute."},
|
|
165
|
-
)
|
|
166
|
-
|
|
167
|
-
previous_add_context = state.get("add_context", {})
|
|
168
|
-
add_context = inject_context(previous_add_context, self.tools_context)
|
|
169
|
-
existing_context = state.get("context", {})
|
|
170
|
-
context = {**existing_context, **add_context}
|
|
171
|
-
# Execute the script in the sandbox
|
|
172
|
-
|
|
173
|
-
output, new_context, new_add_context = self.eval_fn(
|
|
174
|
-
code, context, previous_add_context, 180
|
|
175
|
-
) # default timeout 3 min
|
|
176
|
-
output = smart_truncate(output)
|
|
177
|
-
|
|
178
|
-
return Command(
|
|
179
|
-
goto="call_model",
|
|
180
|
-
update={
|
|
181
|
-
"output": output,
|
|
182
|
-
"code": "",
|
|
183
|
-
"context": new_context,
|
|
184
|
-
"add_context": new_add_context,
|
|
185
|
-
},
|
|
186
|
-
)
|
|
187
|
-
|
|
188
|
-
def playbook(state: CodeActState) -> Command[Literal["call_model"]]:
|
|
189
|
-
playbook_mode = state.get("playbook_mode")
|
|
190
|
-
if playbook_mode == "planning":
|
|
191
|
-
planning_instructions = self.instructions + PLAYBOOK_PLANNING_PROMPT
|
|
192
|
-
messages = [{"role": "system", "content": planning_instructions}] + state["messages"]
|
|
193
|
-
|
|
194
|
-
response = self.model_instance.invoke(messages)
|
|
195
|
-
response = cast(AIMessage, response)
|
|
196
|
-
response_text = get_message_text(response)
|
|
197
|
-
# Extract plan from response text between triple backticks
|
|
198
|
-
plan_match = re.search(r"```(.*?)```", response_text, re.DOTALL)
|
|
199
|
-
if plan_match:
|
|
200
|
-
plan = plan_match.group(1).strip()
|
|
201
|
-
else:
|
|
202
|
-
plan = response_text.strip()
|
|
203
|
-
return Command(update={"messages": [response], "playbook_mode": "confirming", "plan": plan})
|
|
204
|
-
|
|
205
|
-
elif playbook_mode == "confirming":
|
|
206
|
-
confirmation_instructions = self.instructions + PLAYBOOK_CONFIRMING_PROMPT
|
|
207
|
-
messages = [{"role": "system", "content": confirmation_instructions}] + state["messages"]
|
|
208
|
-
response = self.model_instance.invoke(messages, stream=False)
|
|
209
|
-
response = get_message_text(response)
|
|
210
|
-
if "true" in response.lower():
|
|
211
|
-
return Command(goto="playbook", update={"playbook_mode": "generating"})
|
|
212
|
-
else:
|
|
213
|
-
return Command(goto="playbook", update={"playbook_mode": "planning"})
|
|
214
|
-
|
|
215
|
-
elif playbook_mode == "generating":
|
|
216
|
-
generating_instructions = self.instructions + PLAYBOOK_GENERATING_PROMPT
|
|
217
|
-
messages = [{"role": "system", "content": generating_instructions}] + state["messages"]
|
|
218
|
-
response = cast(AIMessage, self.model_instance.invoke(messages))
|
|
219
|
-
raw_content = get_message_text(response)
|
|
220
|
-
func_code = raw_content.strip()
|
|
221
|
-
func_code = func_code.replace("```python", "").replace("```", "")
|
|
222
|
-
func_code = func_code.strip()
|
|
223
|
-
|
|
224
|
-
# Extract function name (handle both regular and async functions)
|
|
225
|
-
match = re.search(r"^\s*(?:async\s+)?def\s+([a-zA-Z_][a-zA-Z0-9_]*)\s*\(", func_code, re.MULTILINE)
|
|
226
|
-
if match:
|
|
227
|
-
function_name = match.group(1)
|
|
228
|
-
else:
|
|
229
|
-
function_name = "generated_playbook"
|
|
230
|
-
|
|
231
|
-
# Save or update an Agent using the helper registry
|
|
232
|
-
saved_note = ""
|
|
233
|
-
try:
|
|
234
|
-
if not self.playbook_registry:
|
|
235
|
-
raise ValueError("Playbook registry is not configured")
|
|
236
|
-
|
|
237
|
-
# Build instructions payload embedding the plan and function code
|
|
238
|
-
instructions_payload = {
|
|
239
|
-
"playbookPlan": state["plan"],
|
|
240
|
-
"playbookScript": {
|
|
241
|
-
"name": function_name,
|
|
242
|
-
"code": func_code,
|
|
243
|
-
},
|
|
244
|
-
}
|
|
245
|
-
|
|
246
|
-
# Convert tool ids list to dict
|
|
247
|
-
tool_dict = convert_tool_ids_to_dict(state["selected_tool_ids"])
|
|
248
|
-
|
|
249
|
-
res = self.playbook_registry.create_agent(
|
|
250
|
-
name=function_name,
|
|
251
|
-
description=f"Generated playbook: {function_name}",
|
|
252
|
-
instructions=instructions_payload,
|
|
253
|
-
tools=tool_dict,
|
|
254
|
-
visibility="private",
|
|
255
|
-
)
|
|
256
|
-
saved_note = f"Successfully created your playbook! Check it out here: [View Playbook](https://wingmen.info/agents/{res.id})"
|
|
257
|
-
except Exception as e:
|
|
258
|
-
saved_note = f"Failed to save generated playbook as Agent '{function_name}': {e}"
|
|
259
|
-
|
|
260
|
-
# Mock tool call for exit_playbook_mode (for testing/demonstration)
|
|
261
|
-
mock_exit_tool_call = {"name": "exit_playbook_mode", "args": {}, "id": "mock_exit_playbook_123"}
|
|
262
|
-
mock_assistant_message = AIMessage(content=saved_note, tool_calls=[mock_exit_tool_call])
|
|
263
|
-
|
|
264
|
-
# Mock tool response for exit_playbook_mode
|
|
265
|
-
mock_exit_tool_response = ToolMessage(
|
|
266
|
-
content=json.dumps(f"Exited Playbook Mode.{saved_note}"),
|
|
267
|
-
name="exit_playbook_mode",
|
|
268
|
-
tool_call_id="mock_exit_playbook_123",
|
|
269
|
-
)
|
|
270
|
-
|
|
271
|
-
return Command(
|
|
272
|
-
update={"messages": [mock_assistant_message, mock_exit_tool_response], "playbook_mode": "normal"}
|
|
273
|
-
)
|
|
274
|
-
|
|
275
|
-
def route_entry(state: CodeActState) -> Literal["call_model", "playbook"]:
|
|
276
|
-
"""Route to either normal mode or playbook creation"""
|
|
277
|
-
if state.get("playbook_mode") in ["planning", "confirming", "generating"]:
|
|
278
|
-
return "playbook"
|
|
279
|
-
|
|
280
|
-
return "call_model"
|
|
281
|
-
|
|
282
|
-
agent = StateGraph(state_schema=CodeActState)
|
|
283
|
-
agent.add_node(call_model, retry_policy=RetryPolicy(max_attempts=3, retry_on=filter_retry_on))
|
|
284
|
-
agent.add_node(sandbox)
|
|
285
|
-
agent.add_node(playbook)
|
|
286
|
-
agent.add_node(execute_tools)
|
|
287
|
-
agent.add_conditional_edges(START, route_entry)
|
|
288
|
-
# agent.add_edge(START, "call_model")
|
|
289
|
-
return agent.compile(checkpointer=self.memory)
|
|
@@ -1,14 +0,0 @@
|
|
|
1
|
-
from universal_mcp.agentr.registry import AgentrRegistry
|
|
2
|
-
|
|
3
|
-
from universal_mcp.agents.unified import UnifiedAgent
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
async def agent():
|
|
7
|
-
agent_obj = UnifiedAgent(
|
|
8
|
-
name="CodeAct Agent",
|
|
9
|
-
instructions="Be very concise in your answers.",
|
|
10
|
-
model="anthropic:claude-4-sonnet-20250514",
|
|
11
|
-
tools=[],
|
|
12
|
-
registry=AgentrRegistry(),
|
|
13
|
-
)
|
|
14
|
-
return await agent_obj._build_graph()
|
|
@@ -1,25 +0,0 @@
|
|
|
1
|
-
from typing import Any
|
|
2
|
-
|
|
3
|
-
from universal_mcp.agents.codeact0.utils import light_copy
|
|
4
|
-
|
|
5
|
-
MAX_RETRIES = 3
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
def get_context_str(source: Any | list[Any] | dict[str, Any]) -> str:
|
|
9
|
-
"""Converts context to a string representation."""
|
|
10
|
-
if not isinstance(source, dict):
|
|
11
|
-
if isinstance(source, list):
|
|
12
|
-
source = {f"doc_{i + 1}": str(doc) for i, doc in enumerate(source)}
|
|
13
|
-
else:
|
|
14
|
-
source = {"content": str(source)}
|
|
15
|
-
|
|
16
|
-
return "\n".join(f"<{k}>\n{str(v)}\n</{k}>" for k, v in source.items())
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
def smart_print(data: Any) -> None:
|
|
20
|
-
"""Prints a dictionary or list of dictionaries with string values truncated to 30 characters.
|
|
21
|
-
|
|
22
|
-
Args:
|
|
23
|
-
data: Either a dictionary with string keys, or a list of such dictionaries
|
|
24
|
-
"""
|
|
25
|
-
print(light_copy(data)) # noqa: T201
|
|
@@ -1,192 +0,0 @@
|
|
|
1
|
-
import inspect
|
|
2
|
-
import re
|
|
3
|
-
from collections.abc import Sequence
|
|
4
|
-
|
|
5
|
-
from langchain_core.tools import StructuredTool
|
|
6
|
-
|
|
7
|
-
uneditable_prompt = """
|
|
8
|
-
You are **Wingmen**, an AI Assistant created by AgentR — a creative, straight-forward, and direct principal software engineer with access to tools.
|
|
9
|
-
|
|
10
|
-
Your job is to answer the user's question or perform the task they ask for.
|
|
11
|
-
- Answer simple questions (which do not require you to write any code or access any external resources) directly. Note that any operation that involves using ONLY print functions should be answered directly.
|
|
12
|
-
- For tasks requiring operations or access to external resources, you should achieve the task by writing Python code snippets inside markdown blocks (e.g., ```python ... ```).
|
|
13
|
-
- You also have access to two tools for finding and loading more python functions- `search_functions` and `load_functions`, which you must use for finding functions for using different external applications. Prefer pre-loaded or functions already available when possible, and prioritize connected applications over unconnected ones. When this is not enough to break a tie between similar applications, ask the user.
|
|
14
|
-
- In writing or natural language processing tasks DO NOT answer directly. Instead write python code using the AI functions provided to you for tasks like summarizing, text generation, classification, data extraction from text or unstructured data, etc. Avoid hardcoded approaches to classification, data extraction.
|
|
15
|
-
- The code you write will be executed in a sandbox environment, and you can use the output of previous executions in your code. variables, functions, imports are retained.
|
|
16
|
-
- Read and understand the output of the previous code snippet and use it to answer the user's request. Note that the code output is NOT visible to the user, so after the task is complete, you have to give the output to the user in a markdown format.
|
|
17
|
-
- If needed, feel free to ask for more information from the user to clarify the task.
|
|
18
|
-
|
|
19
|
-
GUIDELINES for writing code:
|
|
20
|
-
- Variables defined at the top level of previous code snippets can be referenced in your code.
|
|
21
|
-
- External functions which return a dict or list[dict] are ambiguous. Therefore, you MUST explore the structure of the returned data using `smart_print()` statements before using it, printing keys and values. `smart_print` truncates long strings from data, preventing huge output logs.
|
|
22
|
-
- When an operation involves running a fixed set of steps on a list of items, run one run correctly and then use a for loop to run the steps on each item in the list.
|
|
23
|
-
- In a single code snippet, try to achieve as much as possible.
|
|
24
|
-
- You can only import libraries that come pre-installed with Python. For external functions, use the search and load tools to access them in the code.
|
|
25
|
-
- For displaying final results to the user, you must present your output in markdown format, including image links, so that they are rendered and displayed to the user. The code output is NOT visible to the user.
|
|
26
|
-
- Call all functions using keyword arguments only, never positional arguments.
|
|
27
|
-
- Async Functions (Critical): Use them only as follows-
|
|
28
|
-
Case 1: Top-level await without asyncio.run()
|
|
29
|
-
Wrap in async function and call with asyncio.run():
|
|
30
|
-
async def main():
|
|
31
|
-
result = await some_async_function()
|
|
32
|
-
return result
|
|
33
|
-
asyncio.run(main())
|
|
34
|
-
Case 2: Using asyncio.run() directly
|
|
35
|
-
If code already contains asyncio.run(), use as-is — do not wrap again:
|
|
36
|
-
asyncio.run(some_async_function())
|
|
37
|
-
Rules:
|
|
38
|
-
- Never use await outside an async function
|
|
39
|
-
- Never use await asyncio.run()
|
|
40
|
-
- Never nest asyncio.run() calls
|
|
41
|
-
"""
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
def make_safe_function_name(name: str) -> str:
|
|
45
|
-
"""Convert a tool name to a valid Python function name."""
|
|
46
|
-
# Replace non-alphanumeric characters with underscores
|
|
47
|
-
safe_name = re.sub(r"[^a-zA-Z0-9_]", "_", name)
|
|
48
|
-
# Ensure the name doesn't start with a digit
|
|
49
|
-
if safe_name and safe_name[0].isdigit():
|
|
50
|
-
safe_name = f"tool_{safe_name}"
|
|
51
|
-
# Handle empty name edge case
|
|
52
|
-
if not safe_name:
|
|
53
|
-
safe_name = "unnamed_tool"
|
|
54
|
-
return safe_name
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
def dedent(text):
|
|
58
|
-
"""Remove any common leading whitespace from every line in `text`.
|
|
59
|
-
|
|
60
|
-
This can be used to make triple-quoted strings line up with the left
|
|
61
|
-
edge of the display, while still presenting them in the source code
|
|
62
|
-
in indented form.
|
|
63
|
-
|
|
64
|
-
Note that tabs and spaces are both treated as whitespace, but they
|
|
65
|
-
are not equal: the lines " hello" and "\\thello" are
|
|
66
|
-
considered to have no common leading whitespace.
|
|
67
|
-
|
|
68
|
-
Entirely blank lines are normalized to a newline character.
|
|
69
|
-
"""
|
|
70
|
-
# Look for the longest leading string of spaces and tabs common to
|
|
71
|
-
# all lines.
|
|
72
|
-
margin = None
|
|
73
|
-
_whitespace_only_re = re.compile("^[ \t]+$", re.MULTILINE)
|
|
74
|
-
_leading_whitespace_re = re.compile("(^[ \t]*)(?:[^ \t\n])", re.MULTILINE)
|
|
75
|
-
text = _whitespace_only_re.sub("", text)
|
|
76
|
-
indents = _leading_whitespace_re.findall(text)
|
|
77
|
-
for indent in indents:
|
|
78
|
-
if margin is None:
|
|
79
|
-
margin = indent
|
|
80
|
-
|
|
81
|
-
# Current line more deeply indented than previous winner:
|
|
82
|
-
# no change (previous winner is still on top).
|
|
83
|
-
elif indent.startswith(margin):
|
|
84
|
-
pass
|
|
85
|
-
|
|
86
|
-
# Current line consistent with and no deeper than previous winner:
|
|
87
|
-
# it's the new winner.
|
|
88
|
-
elif margin.startswith(indent):
|
|
89
|
-
margin = indent
|
|
90
|
-
|
|
91
|
-
# Find the largest common whitespace between current line and previous
|
|
92
|
-
# winner.
|
|
93
|
-
else:
|
|
94
|
-
for i, (x, y) in enumerate(zip(margin, indent)):
|
|
95
|
-
if x != y:
|
|
96
|
-
margin = margin[:i]
|
|
97
|
-
break
|
|
98
|
-
|
|
99
|
-
# sanity check (testing/debugging only)
|
|
100
|
-
if 0 and margin:
|
|
101
|
-
for line in text.split("\n"):
|
|
102
|
-
assert not line or line.startswith(margin), f"line = {line!r}, margin = {margin!r}"
|
|
103
|
-
|
|
104
|
-
if margin:
|
|
105
|
-
text = re.sub(r"(?m)^" + margin, "", text)
|
|
106
|
-
return text
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
def indent(text, prefix, predicate=None):
|
|
110
|
-
"""Adds 'prefix' to the beginning of selected lines in 'text'.
|
|
111
|
-
|
|
112
|
-
If 'predicate' is provided, 'prefix' will only be added to the lines
|
|
113
|
-
where 'predicate(line)' is True. If 'predicate' is not provided,
|
|
114
|
-
it will default to adding 'prefix' to all non-empty lines that do not
|
|
115
|
-
consist solely of whitespace characters.
|
|
116
|
-
"""
|
|
117
|
-
if predicate is None:
|
|
118
|
-
# str.splitlines(True) doesn't produce empty string.
|
|
119
|
-
# ''.splitlines(True) => []
|
|
120
|
-
# 'foo\n'.splitlines(True) => ['foo\n']
|
|
121
|
-
# So we can use just `not s.isspace()` here.
|
|
122
|
-
def predicate(s):
|
|
123
|
-
return not s.isspace()
|
|
124
|
-
|
|
125
|
-
prefixed_lines = []
|
|
126
|
-
for line in text.splitlines(True):
|
|
127
|
-
if predicate(line):
|
|
128
|
-
prefixed_lines.append(prefix)
|
|
129
|
-
prefixed_lines.append(line)
|
|
130
|
-
|
|
131
|
-
return "".join(prefixed_lines)
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
def create_default_prompt(
|
|
135
|
-
tools: Sequence[StructuredTool],
|
|
136
|
-
base_prompt: str | None = None,
|
|
137
|
-
):
|
|
138
|
-
system_prompt = uneditable_prompt.strip() + (
|
|
139
|
-
"\n\nIn addition to the Python Standard Library, you can use the following external functions:\n"
|
|
140
|
-
)
|
|
141
|
-
tools_context = {}
|
|
142
|
-
|
|
143
|
-
for tool in tools:
|
|
144
|
-
if hasattr(tool, "func") and tool.func is not None:
|
|
145
|
-
tool_callable = tool.func
|
|
146
|
-
is_async = False
|
|
147
|
-
elif hasattr(tool, "coroutine") and tool.coroutine is not None:
|
|
148
|
-
tool_callable = tool.coroutine
|
|
149
|
-
is_async = True
|
|
150
|
-
system_prompt += f'''{"async " if is_async else ""}def {tool.name} {str(inspect.signature(tool_callable))}:
|
|
151
|
-
"""{tool.description}"""
|
|
152
|
-
...
|
|
153
|
-
'''
|
|
154
|
-
safe_name = make_safe_function_name(tool.name)
|
|
155
|
-
tools_context[safe_name] = tool_callable
|
|
156
|
-
|
|
157
|
-
if base_prompt and base_prompt.strip():
|
|
158
|
-
system_prompt += f"Your goal is to perform the following task:\n\n{base_prompt}"
|
|
159
|
-
|
|
160
|
-
return system_prompt, tools_context
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
PLAYBOOK_PLANNING_PROMPT = """Now, you are tasked with creating a reusable playbook from the user's previous workflow.
|
|
164
|
-
|
|
165
|
-
TASK: Analyze the conversation history and code execution to create a step-by-step plan for a reusable function. Do not include the searching and loading of tools. Assume that the tools have already been loaded.
|
|
166
|
-
|
|
167
|
-
Your plan should:
|
|
168
|
-
1. Identify the key steps in the workflow
|
|
169
|
-
2. Mark user-specific variables that should become the main playbook function parameters using `variable_name` syntax. Intermediate variables should not be highlighted using ``
|
|
170
|
-
3. Keep the logic generic and reusable
|
|
171
|
-
4. Be clear and concise
|
|
172
|
-
|
|
173
|
-
Example:
|
|
174
|
-
```
|
|
175
|
-
1. Connect to database using `db_connection_string`
|
|
176
|
-
2. Query user data for `user_id`
|
|
177
|
-
3. Process results and calculate `metric_name`
|
|
178
|
-
4. Send notification to `email_address`
|
|
179
|
-
```
|
|
180
|
-
|
|
181
|
-
Now create a plan based on the conversation history. Enclose it between ``` and ```. Ask the user if the plan is okay."""
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
PLAYBOOK_CONFIRMING_PROMPT = """Now, you are tasked with confirming the playbook plan. Return True if the user is happy with the plan, False otherwise. Do not say anything else in your response. The user response will be the last message in the chain.
|
|
185
|
-
"""
|
|
186
|
-
|
|
187
|
-
PLAYBOOK_GENERATING_PROMPT = """Now, you are tasked with generating the playbook function. Return the function in Python code.
|
|
188
|
-
Do not include any other text in your response.
|
|
189
|
-
The function should be a single, complete piece of code that can be executed independently, based on previously executed code snippets that executed correctly.
|
|
190
|
-
The parameters of the function should be the same as the final confirmed playbook plan.
|
|
191
|
-
Do not include anything other than python code in your response
|
|
192
|
-
"""
|
|
@@ -1,101 +0,0 @@
|
|
|
1
|
-
import contextlib
|
|
2
|
-
import inspect
|
|
3
|
-
import io
|
|
4
|
-
import queue
|
|
5
|
-
import re
|
|
6
|
-
import socket
|
|
7
|
-
import threading
|
|
8
|
-
import types
|
|
9
|
-
from typing import Any
|
|
10
|
-
|
|
11
|
-
from langchain_core.tools import tool
|
|
12
|
-
|
|
13
|
-
from universal_mcp.agents.codeact0.utils import derive_context
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
def eval_unsafe(
|
|
17
|
-
code: str, _locals: dict[str, Any], add_context: dict[str, Any], timeout: int = 180
|
|
18
|
-
) -> tuple[str, dict[str, Any], dict[str, Any]]:
|
|
19
|
-
"""
|
|
20
|
-
Execute code safely with a timeout.
|
|
21
|
-
- Returns (output_str, filtered_locals_dict, new_add_context)
|
|
22
|
-
- Errors or timeout are returned as output_str.
|
|
23
|
-
- Previous variables in _locals persist across calls.
|
|
24
|
-
"""
|
|
25
|
-
|
|
26
|
-
EXCLUDE_TYPES = (
|
|
27
|
-
types.ModuleType,
|
|
28
|
-
type(re.match("", "")),
|
|
29
|
-
type(threading.Lock()),
|
|
30
|
-
type(threading.RLock()),
|
|
31
|
-
threading.Event,
|
|
32
|
-
threading.Condition,
|
|
33
|
-
threading.Semaphore,
|
|
34
|
-
queue.Queue,
|
|
35
|
-
socket.socket,
|
|
36
|
-
io.IOBase,
|
|
37
|
-
)
|
|
38
|
-
|
|
39
|
-
result_container = {"output": "<no output>"}
|
|
40
|
-
|
|
41
|
-
def target():
|
|
42
|
-
try:
|
|
43
|
-
with contextlib.redirect_stdout(io.StringIO()) as f:
|
|
44
|
-
exec(code, _locals, _locals)
|
|
45
|
-
result_container["output"] = f.getvalue() or "<code ran, no output printed to stdout>"
|
|
46
|
-
except Exception as e:
|
|
47
|
-
result_container["output"] = "Error during execution: " + str(e)
|
|
48
|
-
|
|
49
|
-
thread = threading.Thread(target=target)
|
|
50
|
-
thread.start()
|
|
51
|
-
thread.join(timeout)
|
|
52
|
-
|
|
53
|
-
if thread.is_alive():
|
|
54
|
-
result_container["output"] = f"Code timeout: code execution exceeded {timeout} seconds."
|
|
55
|
-
|
|
56
|
-
# Filter locals for picklable/storable variables
|
|
57
|
-
all_vars = {}
|
|
58
|
-
for key, value in _locals.items():
|
|
59
|
-
if key == "__builtins__":
|
|
60
|
-
continue
|
|
61
|
-
if inspect.iscoroutine(value) or inspect.iscoroutinefunction(value):
|
|
62
|
-
continue
|
|
63
|
-
if inspect.isasyncgen(value) or inspect.isasyncgenfunction(value):
|
|
64
|
-
continue
|
|
65
|
-
if isinstance(value, EXCLUDE_TYPES):
|
|
66
|
-
continue
|
|
67
|
-
if not callable(value) or not hasattr(value, "__name__"):
|
|
68
|
-
all_vars[key] = value
|
|
69
|
-
|
|
70
|
-
# Safely derive context
|
|
71
|
-
try:
|
|
72
|
-
new_add_context = derive_context(code, add_context)
|
|
73
|
-
except Exception:
|
|
74
|
-
new_add_context = add_context
|
|
75
|
-
|
|
76
|
-
return result_container["output"], all_vars, new_add_context
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
@tool(parse_docstring=True)
|
|
80
|
-
def execute_ipython_cell(snippet: str) -> str:
|
|
81
|
-
"""
|
|
82
|
-
Executes Python code in an IPython notebook cell:
|
|
83
|
-
* The output generated by the notebook cell is returned by this tool
|
|
84
|
-
* State is persistent across executions and discussions with the user
|
|
85
|
-
* The input code may reference variables created in previous executions
|
|
86
|
-
|
|
87
|
-
Args:
|
|
88
|
-
snippet: The Python code to execute.
|
|
89
|
-
|
|
90
|
-
Returns:
|
|
91
|
-
String containing the execution output or error message.
|
|
92
|
-
|
|
93
|
-
Raises:
|
|
94
|
-
ValueError: If snippet is empty.
|
|
95
|
-
"""
|
|
96
|
-
# Validate required parameters
|
|
97
|
-
if not snippet or not snippet.strip():
|
|
98
|
-
raise ValueError("Parameter 'snippet' is required and cannot be empty or whitespace")
|
|
99
|
-
|
|
100
|
-
# Your actual execution logic would go here
|
|
101
|
-
return f"Successfully executed {len(snippet)} characters of Python code"
|
|
@@ -1,42 +0,0 @@
|
|
|
1
|
-
from typing import Annotated, Any
|
|
2
|
-
|
|
3
|
-
from langgraph.prebuilt.chat_agent_executor import AgentState
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
def _enqueue(left: list, right: list) -> list:
|
|
7
|
-
"""Treat left as a FIFO queue, append new items from right (preserve order),
|
|
8
|
-
keep items unique, and cap total size to 20 (drop oldest items)."""
|
|
9
|
-
max_size = 30
|
|
10
|
-
preferred_size = 20
|
|
11
|
-
if len(right) > preferred_size:
|
|
12
|
-
preferred_size = min(max_size, len(right))
|
|
13
|
-
queue = list(left or [])
|
|
14
|
-
|
|
15
|
-
for item in right[:preferred_size] or []:
|
|
16
|
-
if item in queue:
|
|
17
|
-
queue.remove(item)
|
|
18
|
-
queue.append(item)
|
|
19
|
-
|
|
20
|
-
if len(queue) > preferred_size:
|
|
21
|
-
queue = queue[-preferred_size:]
|
|
22
|
-
|
|
23
|
-
return queue
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
class CodeActState(AgentState):
|
|
27
|
-
"""State for CodeAct agent."""
|
|
28
|
-
|
|
29
|
-
context: dict[str, Any]
|
|
30
|
-
"""Dictionary containing the execution context with available tools and variables."""
|
|
31
|
-
add_context: dict[str, Any]
|
|
32
|
-
"""Dictionary containing the additional context (functions, classes, imports) to be added to the execution context."""
|
|
33
|
-
playbook_mode: str | None
|
|
34
|
-
"""State for the playbook agent."""
|
|
35
|
-
selected_tool_ids: Annotated[list[str], _enqueue]
|
|
36
|
-
"""Queue for tools exported from registry"""
|
|
37
|
-
plan: str | None
|
|
38
|
-
"""Plan for the playbook agent."""
|
|
39
|
-
code: str | None
|
|
40
|
-
""" Code for the sandbox to run"""
|
|
41
|
-
output: str | None
|
|
42
|
-
""" Output from the last code """
|