universal-mcp-agents 0.1.19__py3-none-any.whl → 0.1.20rc1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of universal-mcp-agents might be problematic. Click here for more details.
- universal_mcp/agents/__init__.py +2 -5
- universal_mcp/agents/base.py +2 -2
- universal_mcp/agents/codeact0/__init__.py +2 -3
- universal_mcp/agents/codeact0/__main__.py +2 -2
- universal_mcp/agents/codeact0/agent.py +231 -83
- universal_mcp/agents/codeact0/langgraph_agent.py +1 -1
- universal_mcp/agents/codeact0/prompts.py +37 -7
- universal_mcp/agents/codeact0/sandbox.py +31 -1
- universal_mcp/agents/codeact0/state.py +3 -1
- universal_mcp/agents/codeact0/tools.py +200 -95
- {universal_mcp_agents-0.1.19.dist-info → universal_mcp_agents-0.1.20rc1.dist-info}/METADATA +1 -1
- {universal_mcp_agents-0.1.19.dist-info → universal_mcp_agents-0.1.20rc1.dist-info}/RECORD +13 -22
- universal_mcp/agents/codeact/__init__.py +0 -3
- universal_mcp/agents/codeact/__main__.py +0 -33
- universal_mcp/agents/codeact/agent.py +0 -240
- universal_mcp/agents/codeact/models.py +0 -11
- universal_mcp/agents/codeact/prompts.py +0 -82
- universal_mcp/agents/codeact/sandbox.py +0 -85
- universal_mcp/agents/codeact/state.py +0 -11
- universal_mcp/agents/codeact/utils.py +0 -68
- universal_mcp/agents/codeact0/playbook_agent.py +0 -360
- {universal_mcp_agents-0.1.19.dist-info → universal_mcp_agents-0.1.20rc1.dist-info}/WHEEL +0 -0
|
@@ -1,360 +0,0 @@
|
|
|
1
|
-
import inspect
|
|
2
|
-
import json
|
|
3
|
-
import re
|
|
4
|
-
from collections.abc import Callable
|
|
5
|
-
from typing import Literal, cast
|
|
6
|
-
|
|
7
|
-
from langchain_core.messages import AIMessage, ToolMessage
|
|
8
|
-
from langchain_core.tools import StructuredTool
|
|
9
|
-
from langchain_core.tools import tool as create_tool
|
|
10
|
-
from langgraph.checkpoint.base import BaseCheckpointSaver
|
|
11
|
-
from langgraph.graph import START, StateGraph
|
|
12
|
-
from langgraph.types import Command, RetryPolicy
|
|
13
|
-
from universal_mcp.tools.registry import ToolRegistry
|
|
14
|
-
from universal_mcp.types import ToolConfig, ToolFormat
|
|
15
|
-
|
|
16
|
-
from universal_mcp.agents.base import BaseAgent
|
|
17
|
-
from universal_mcp.agents.codeact0.llm_tool import ai_classify, call_llm, data_extractor, smart_print
|
|
18
|
-
from universal_mcp.agents.codeact0.prompts import (
|
|
19
|
-
create_default_prompt,
|
|
20
|
-
)
|
|
21
|
-
from universal_mcp.agents.codeact0.sandbox import eval_unsafe, execute_ipython_cell
|
|
22
|
-
from universal_mcp.agents.codeact0.state import CodeActState
|
|
23
|
-
from universal_mcp.agents.codeact0.tools import (
|
|
24
|
-
create_meta_tools,
|
|
25
|
-
enter_playbook_mode,
|
|
26
|
-
get_valid_tools,
|
|
27
|
-
)
|
|
28
|
-
from universal_mcp.agents.codeact0.utils import inject_context, smart_truncate
|
|
29
|
-
from universal_mcp.agents.llm import load_chat_model
|
|
30
|
-
from universal_mcp.agents.utils import convert_tool_ids_to_dict, filter_retry_on, get_message_text
|
|
31
|
-
|
|
32
|
-
PLAYBOOK_PLANNING_PROMPT = """Now, you are tasked with creating a reusable playbook from the user's previous workflow.
|
|
33
|
-
|
|
34
|
-
TASK: Analyze the conversation history and code execution to create a step-by-step plan for a reusable function. Do not include the searching and loading of tools. Assume that the tools have already been loaded.
|
|
35
|
-
|
|
36
|
-
Your plan should:
|
|
37
|
-
1. Identify the key steps in the workflow
|
|
38
|
-
2. Mark user-specific variables that should become the main playbook function parameters using `variable_name` syntax. Intermediate variables should not be highlighted using ``
|
|
39
|
-
3. Keep the logic generic and reusable
|
|
40
|
-
4. Be clear and concise
|
|
41
|
-
|
|
42
|
-
Example:
|
|
43
|
-
```
|
|
44
|
-
1. Connect to database using `db_connection_string`
|
|
45
|
-
2. Query user data for `user_id`
|
|
46
|
-
3. Process results and calculate `metric_name`
|
|
47
|
-
4. Send notification to `email_address`
|
|
48
|
-
```
|
|
49
|
-
|
|
50
|
-
Now create a plan based on the conversation history. Enclose it between ``` and ```. Ask the user if the plan is okay."""
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
PLAYBOOK_CONFIRMING_PROMPT = """Now, you are tasked with confirming the playbook plan. Return True if the user is happy with the plan, False otherwise. Do not say anything else in your response. The user response will be the last message in the chain.
|
|
54
|
-
"""
|
|
55
|
-
|
|
56
|
-
PLAYBOOK_GENERATING_PROMPT = """Now, you are tasked with generating the playbook function. Return the function in Python code.
|
|
57
|
-
Do not include any other text in your response.
|
|
58
|
-
The function should be a single, complete piece of code that can be executed independently, based on previously executed code snippets that executed correctly.
|
|
59
|
-
The parameters of the function should be the same as the final confirmed playbook plan.
|
|
60
|
-
Do not include anything other than python code in your response
|
|
61
|
-
"""
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
class CodeActPlaybookAgent(BaseAgent):
|
|
65
|
-
def __init__(
|
|
66
|
-
self,
|
|
67
|
-
name: str,
|
|
68
|
-
instructions: str,
|
|
69
|
-
model: str,
|
|
70
|
-
memory: BaseCheckpointSaver | None = None,
|
|
71
|
-
tools: ToolConfig | None = None,
|
|
72
|
-
registry: ToolRegistry | None = None,
|
|
73
|
-
playbook_registry: object | None = None,
|
|
74
|
-
sandbox_timeout: int = 20,
|
|
75
|
-
**kwargs,
|
|
76
|
-
):
|
|
77
|
-
super().__init__(
|
|
78
|
-
name=name,
|
|
79
|
-
instructions=instructions,
|
|
80
|
-
model=model,
|
|
81
|
-
memory=memory,
|
|
82
|
-
**kwargs,
|
|
83
|
-
)
|
|
84
|
-
self.model_instance = load_chat_model(model)
|
|
85
|
-
self.tools_config = tools or []
|
|
86
|
-
self.registry = registry
|
|
87
|
-
self.playbook_registry = playbook_registry
|
|
88
|
-
self.eval_fn = eval_unsafe
|
|
89
|
-
self.sandbox_timeout = sandbox_timeout
|
|
90
|
-
self.processed_tools: list[StructuredTool | Callable] = []
|
|
91
|
-
|
|
92
|
-
async def _build_graph(self):
|
|
93
|
-
meta_tools = create_meta_tools(self.registry)
|
|
94
|
-
additional_tools = [smart_print, data_extractor, ai_classify, call_llm, meta_tools["web_search"]]
|
|
95
|
-
self.additional_tools = [t if isinstance(t, StructuredTool) else create_tool(t) for t in additional_tools]
|
|
96
|
-
|
|
97
|
-
async def call_model(state: CodeActState) -> Command[Literal["sandbox", "execute_tools"]]:
|
|
98
|
-
self.exported_tools = []
|
|
99
|
-
if self.tools_config:
|
|
100
|
-
# Convert dict format to list format if needed
|
|
101
|
-
if isinstance(self.tools_config, dict):
|
|
102
|
-
self.tools_config = [
|
|
103
|
-
f"{provider}__{tool}" for provider, tools in self.tools_config.items() for tool in tools
|
|
104
|
-
]
|
|
105
|
-
if not self.registry:
|
|
106
|
-
raise ValueError("Tools are configured but no registry is provided")
|
|
107
|
-
# Langchain tools are fine
|
|
108
|
-
self.tools_config.extend(state.get("selected_tool_ids", []))
|
|
109
|
-
self.exported_tools = await self.registry.export_tools(self.tools_config, ToolFormat.LANGCHAIN)
|
|
110
|
-
self.final_instructions, self.tools_context = create_default_prompt(
|
|
111
|
-
self.exported_tools, self.additional_tools, self.instructions
|
|
112
|
-
)
|
|
113
|
-
messages = [{"role": "system", "content": self.final_instructions}] + state["messages"]
|
|
114
|
-
|
|
115
|
-
# Run the model and potentially loop for reflection
|
|
116
|
-
model_with_tools = self.model_instance.bind_tools(
|
|
117
|
-
tools=[
|
|
118
|
-
execute_ipython_cell,
|
|
119
|
-
enter_playbook_mode,
|
|
120
|
-
meta_tools["search_functions"],
|
|
121
|
-
meta_tools["load_functions"],
|
|
122
|
-
],
|
|
123
|
-
tool_choice="auto",
|
|
124
|
-
)
|
|
125
|
-
response = cast(AIMessage, model_with_tools.invoke(messages))
|
|
126
|
-
if response.tool_calls:
|
|
127
|
-
return Command(goto="execute_tools", update={"messages": [response]})
|
|
128
|
-
else:
|
|
129
|
-
return Command(update={"messages": [response], "model_with_tools": model_with_tools})
|
|
130
|
-
|
|
131
|
-
# if response.tool_calls:
|
|
132
|
-
# if len(response.tool_calls) > 1:
|
|
133
|
-
# raise Exception("Not possible in Claude with llm.bind_tools(tools=tools, tool_choice='auto')")
|
|
134
|
-
# if response.tool_calls[0]["name"] == "enter_playbook_mode":
|
|
135
|
-
# return Command(goto="playbook", update = {"playbook_mode": "planning"})
|
|
136
|
-
# if response.tool_calls[0]["name"] != "execute_ipython_cell":
|
|
137
|
-
# raise Exception(
|
|
138
|
-
# f"Unexpected tool call: {response.tool_calls[0]['name']}. Expected 'execute_ipython_cell'."
|
|
139
|
-
# )
|
|
140
|
-
# if (
|
|
141
|
-
# response.tool_calls[0]["args"].get("snippet") is None
|
|
142
|
-
# or not response.tool_calls[0]["args"]["snippet"].strip()
|
|
143
|
-
# ):
|
|
144
|
-
# raise Exception("Tool call 'execute_ipython_cell' requires a non-empty 'snippet' argument.")
|
|
145
|
-
# return Command(goto="sandbox", update={"messages": [response]})
|
|
146
|
-
# else:
|
|
147
|
-
# return Command(update={"messages": [response]})
|
|
148
|
-
|
|
149
|
-
async def execute_tools(state: CodeActState) -> Command[Literal["call_model", "playbook", "sandbox"]]:
|
|
150
|
-
"""Execute tool calls"""
|
|
151
|
-
last_message = state["messages"][-1]
|
|
152
|
-
tool_calls = last_message.tool_calls if isinstance(last_message, AIMessage) else []
|
|
153
|
-
|
|
154
|
-
tool_messages = []
|
|
155
|
-
new_tool_ids = []
|
|
156
|
-
ask_user = False
|
|
157
|
-
ai_msg = ""
|
|
158
|
-
tool_result = ""
|
|
159
|
-
|
|
160
|
-
for tool_call in tool_calls:
|
|
161
|
-
try:
|
|
162
|
-
if tool_call["name"] == "enter_playbook_mode":
|
|
163
|
-
tool_message = ToolMessage(
|
|
164
|
-
content=json.dumps("Entered Playbook Mode."),
|
|
165
|
-
name=tool_call["name"],
|
|
166
|
-
tool_call_id=tool_call["id"],
|
|
167
|
-
)
|
|
168
|
-
return Command(
|
|
169
|
-
goto="playbook",
|
|
170
|
-
update={"playbook_mode": "planning", "messages": [tool_message]}, # Entered Playbook mode
|
|
171
|
-
)
|
|
172
|
-
elif tool_call["name"] == "execute_ipython_cell":
|
|
173
|
-
return Command(goto="sandbox")
|
|
174
|
-
elif tool_call["name"] == "load_functions": # Handle load_functions separately
|
|
175
|
-
valid_tools, unconnected_links = await get_valid_tools(
|
|
176
|
-
tool_ids=tool_call["args"]["tool_ids"], registry=self.registry
|
|
177
|
-
)
|
|
178
|
-
new_tool_ids.extend(valid_tools)
|
|
179
|
-
# Create tool message response
|
|
180
|
-
tool_result = f"Successfully loaded {len(valid_tools)} tools: {valid_tools}"
|
|
181
|
-
links = "\n".join(unconnected_links)
|
|
182
|
-
if links:
|
|
183
|
-
ask_user = True
|
|
184
|
-
ai_msg = f"Please login to the following app(s) using the following links and let me know in order to proceed:\n {links} "
|
|
185
|
-
elif tool_call["name"] == "search_functions":
|
|
186
|
-
tool_result = await meta_tools["search_functions"].ainvoke(tool_call["args"])
|
|
187
|
-
else:
|
|
188
|
-
raise Exception(
|
|
189
|
-
f"Unexpected tool call: {tool_call['name']}. "
|
|
190
|
-
"tool calls must be one of 'enter_playbook_mode', 'execute_ipython_cell', 'load_functions', or 'search_functions'"
|
|
191
|
-
)
|
|
192
|
-
except Exception as e:
|
|
193
|
-
tool_result = str(e)
|
|
194
|
-
|
|
195
|
-
tool_message = ToolMessage(
|
|
196
|
-
content=json.dumps(tool_result),
|
|
197
|
-
name=tool_call["name"],
|
|
198
|
-
tool_call_id=tool_call["id"],
|
|
199
|
-
)
|
|
200
|
-
tool_messages.append(tool_message)
|
|
201
|
-
|
|
202
|
-
if new_tool_ids:
|
|
203
|
-
self.tools_config.extend(new_tool_ids)
|
|
204
|
-
self.exported_tools = await self.registry.export_tools(self.tools_config, ToolFormat.LANGCHAIN)
|
|
205
|
-
self.final_instructions, self.tools_context = create_default_prompt(
|
|
206
|
-
self.exported_tools, self.additional_tools, self.instructions
|
|
207
|
-
)
|
|
208
|
-
|
|
209
|
-
if ask_user:
|
|
210
|
-
tool_messages.append(AIMessage(content=ai_msg))
|
|
211
|
-
return Command(update={"messages": tool_messages, "selected_tool_ids": new_tool_ids})
|
|
212
|
-
|
|
213
|
-
return Command(goto="call_model", update={"messages": tool_messages, "selected_tool_ids": new_tool_ids})
|
|
214
|
-
|
|
215
|
-
# If eval_fn is a async, we define async node function.
|
|
216
|
-
if inspect.iscoroutinefunction(self.eval_fn):
|
|
217
|
-
raise ValueError("eval_fn must be a synchronous function, not a coroutine.")
|
|
218
|
-
# async def sandbox(state: StateSchema):
|
|
219
|
-
# existing_context = state.get("context", {})
|
|
220
|
-
# context = {**existing_context, **tools_context}
|
|
221
|
-
# # Execute the script in the sandbox
|
|
222
|
-
# output, new_vars = await eval_fn(state["script"], context)
|
|
223
|
-
# new_context = {**existing_context, **new_vars}
|
|
224
|
-
# return {
|
|
225
|
-
# "messages": [{"role": "user", "content": output}],
|
|
226
|
-
# "context": new_context,
|
|
227
|
-
# }
|
|
228
|
-
else:
|
|
229
|
-
|
|
230
|
-
def sandbox(state: CodeActState) -> Command[Literal["call_model"]]:
|
|
231
|
-
tool_call = state["messages"][-1].tool_calls[0] # type: ignore
|
|
232
|
-
code = tool_call["args"]["snippet"]
|
|
233
|
-
previous_add_context = state.get("add_context", {})
|
|
234
|
-
add_context = inject_context(previous_add_context, self.tools_context)
|
|
235
|
-
existing_context = state.get("context", {})
|
|
236
|
-
context = {**existing_context, **add_context}
|
|
237
|
-
# Execute the script in the sandbox
|
|
238
|
-
|
|
239
|
-
output, new_context, new_add_context = self.eval_fn(
|
|
240
|
-
code, context, previous_add_context, 180
|
|
241
|
-
) # default timeout 3 min
|
|
242
|
-
output = smart_truncate(output)
|
|
243
|
-
|
|
244
|
-
return Command(
|
|
245
|
-
goto="call_model",
|
|
246
|
-
update={
|
|
247
|
-
"messages": [
|
|
248
|
-
ToolMessage(
|
|
249
|
-
content=output,
|
|
250
|
-
name=tool_call["name"],
|
|
251
|
-
tool_call_id=tool_call["id"],
|
|
252
|
-
)
|
|
253
|
-
],
|
|
254
|
-
"context": new_context,
|
|
255
|
-
"add_context": new_add_context,
|
|
256
|
-
},
|
|
257
|
-
)
|
|
258
|
-
|
|
259
|
-
def playbook(state: CodeActState) -> Command[Literal["call_model"]]:
|
|
260
|
-
playbook_mode = state.get("playbook_mode")
|
|
261
|
-
if playbook_mode == "planning":
|
|
262
|
-
planning_instructions = self.instructions + PLAYBOOK_PLANNING_PROMPT
|
|
263
|
-
messages = [{"role": "system", "content": planning_instructions}] + state["messages"]
|
|
264
|
-
|
|
265
|
-
response = self.model_instance.invoke(messages)
|
|
266
|
-
response = cast(AIMessage, response)
|
|
267
|
-
response_text = get_message_text(response)
|
|
268
|
-
# Extract plan from response text between triple backticks
|
|
269
|
-
plan_match = re.search(r"```(.*?)```", response_text, re.DOTALL)
|
|
270
|
-
if plan_match:
|
|
271
|
-
plan = plan_match.group(1).strip()
|
|
272
|
-
else:
|
|
273
|
-
plan = response_text.strip()
|
|
274
|
-
return Command(update={"messages": [response], "playbook_mode": "confirming", "plan": plan})
|
|
275
|
-
|
|
276
|
-
elif playbook_mode == "confirming":
|
|
277
|
-
confirmation_instructions = self.instructions + PLAYBOOK_CONFIRMING_PROMPT
|
|
278
|
-
messages = [{"role": "system", "content": confirmation_instructions}] + state["messages"]
|
|
279
|
-
response = self.model_instance.invoke(messages, stream=False)
|
|
280
|
-
response = get_message_text(response)
|
|
281
|
-
if "true" in response.lower():
|
|
282
|
-
return Command(goto="playbook", update={"playbook_mode": "generating"})
|
|
283
|
-
else:
|
|
284
|
-
return Command(goto="playbook", update={"playbook_mode": "planning"})
|
|
285
|
-
|
|
286
|
-
elif playbook_mode == "generating":
|
|
287
|
-
generating_instructions = self.instructions + PLAYBOOK_GENERATING_PROMPT
|
|
288
|
-
messages = [{"role": "system", "content": generating_instructions}] + state["messages"]
|
|
289
|
-
response = cast(AIMessage, self.model_instance.invoke(messages))
|
|
290
|
-
raw_content = get_message_text(response)
|
|
291
|
-
func_code = raw_content.strip()
|
|
292
|
-
func_code = func_code.replace("```python", "").replace("```", "")
|
|
293
|
-
func_code = func_code.strip()
|
|
294
|
-
|
|
295
|
-
# Extract function name (handle both regular and async functions)
|
|
296
|
-
match = re.search(r"^\s*(?:async\s+)?def\s+([a-zA-Z_][a-zA-Z0-9_]*)\s*\(", func_code, re.MULTILINE)
|
|
297
|
-
if match:
|
|
298
|
-
function_name = match.group(1)
|
|
299
|
-
else:
|
|
300
|
-
function_name = "generated_playbook"
|
|
301
|
-
|
|
302
|
-
# Save or update an Agent using the helper registry
|
|
303
|
-
saved_note = ""
|
|
304
|
-
try:
|
|
305
|
-
if not self.playbook_registry:
|
|
306
|
-
raise ValueError("Playbook registry is not configured")
|
|
307
|
-
|
|
308
|
-
# Build instructions payload embedding the plan and function code
|
|
309
|
-
instructions_payload = {
|
|
310
|
-
"playbookPlan": state["plan"],
|
|
311
|
-
"playbookScript": {
|
|
312
|
-
"name": function_name,
|
|
313
|
-
"code": func_code,
|
|
314
|
-
},
|
|
315
|
-
}
|
|
316
|
-
|
|
317
|
-
# Convert tool ids list to dict
|
|
318
|
-
tool_dict = convert_tool_ids_to_dict(state["selected_tool_ids"])
|
|
319
|
-
|
|
320
|
-
res = self.playbook_registry.create_agent(
|
|
321
|
-
name=function_name,
|
|
322
|
-
description=f"Generated playbook: {function_name}",
|
|
323
|
-
instructions=instructions_payload,
|
|
324
|
-
tools=tool_dict,
|
|
325
|
-
visibility="private",
|
|
326
|
-
)
|
|
327
|
-
saved_note = f"Successfully created your playbook! Check it out here: [View Playbook](https://wingmen.info/agents/{res.id})"
|
|
328
|
-
except Exception as e:
|
|
329
|
-
saved_note = f"Failed to save generated playbook as Agent '{function_name}': {e}"
|
|
330
|
-
|
|
331
|
-
# Mock tool call for exit_playbook_mode (for testing/demonstration)
|
|
332
|
-
mock_exit_tool_call = {"name": "exit_playbook_mode", "args": {}, "id": "mock_exit_playbook_123"}
|
|
333
|
-
mock_assistant_message = AIMessage(content=saved_note, tool_calls=[mock_exit_tool_call])
|
|
334
|
-
|
|
335
|
-
# Mock tool response for exit_playbook_mode
|
|
336
|
-
mock_exit_tool_response = ToolMessage(
|
|
337
|
-
content=json.dumps(f"Exited Playbook Mode.{saved_note}"),
|
|
338
|
-
name="exit_playbook_mode",
|
|
339
|
-
tool_call_id="mock_exit_playbook_123",
|
|
340
|
-
)
|
|
341
|
-
|
|
342
|
-
return Command(
|
|
343
|
-
update={"messages": [mock_assistant_message, mock_exit_tool_response], "playbook_mode": "normal"}
|
|
344
|
-
)
|
|
345
|
-
|
|
346
|
-
def route_entry(state: CodeActState) -> Literal["call_model", "playbook"]:
|
|
347
|
-
"""Route to either normal mode or playbook creation"""
|
|
348
|
-
if state.get("playbook_mode") in ["planning", "confirming", "generating"]:
|
|
349
|
-
return "playbook"
|
|
350
|
-
|
|
351
|
-
return "call_model"
|
|
352
|
-
|
|
353
|
-
agent = StateGraph(state_schema=CodeActState)
|
|
354
|
-
agent.add_node(call_model, retry_policy=RetryPolicy(max_attempts=3, retry_on=filter_retry_on))
|
|
355
|
-
agent.add_node(sandbox)
|
|
356
|
-
agent.add_node(playbook)
|
|
357
|
-
agent.add_node(execute_tools)
|
|
358
|
-
agent.add_conditional_edges(START, route_entry)
|
|
359
|
-
# agent.add_edge(START, "call_model")
|
|
360
|
-
return agent.compile(checkpointer=self.memory)
|
|
File without changes
|