universal-mcp-agents 0.1.19rc1__py3-none-any.whl → 0.1.24rc3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. universal_mcp/agents/__init__.py +15 -16
  2. universal_mcp/agents/base.py +46 -35
  3. universal_mcp/agents/bigtool/state.py +1 -1
  4. universal_mcp/agents/cli.py +2 -5
  5. universal_mcp/agents/codeact0/__init__.py +2 -3
  6. universal_mcp/agents/codeact0/__main__.py +4 -7
  7. universal_mcp/agents/codeact0/agent.py +444 -96
  8. universal_mcp/agents/codeact0/langgraph_agent.py +1 -1
  9. universal_mcp/agents/codeact0/llm_tool.py +2 -254
  10. universal_mcp/agents/codeact0/prompts.py +247 -137
  11. universal_mcp/agents/codeact0/sandbox.py +52 -18
  12. universal_mcp/agents/codeact0/state.py +26 -6
  13. universal_mcp/agents/codeact0/tools.py +400 -74
  14. universal_mcp/agents/codeact0/utils.py +175 -11
  15. universal_mcp/agents/codeact00/__init__.py +3 -0
  16. universal_mcp/agents/{unified → codeact00}/__main__.py +4 -6
  17. universal_mcp/agents/codeact00/agent.py +578 -0
  18. universal_mcp/agents/codeact00/config.py +77 -0
  19. universal_mcp/agents/{unified → codeact00}/langgraph_agent.py +2 -2
  20. universal_mcp/agents/{unified → codeact00}/llm_tool.py +1 -1
  21. universal_mcp/agents/codeact00/prompts.py +364 -0
  22. universal_mcp/agents/{unified → codeact00}/sandbox.py +52 -18
  23. universal_mcp/agents/codeact00/state.py +66 -0
  24. universal_mcp/agents/codeact00/tools.py +525 -0
  25. universal_mcp/agents/codeact00/utils.py +678 -0
  26. universal_mcp/agents/codeact01/__init__.py +3 -0
  27. universal_mcp/agents/{codeact → codeact01}/__main__.py +4 -11
  28. universal_mcp/agents/codeact01/agent.py +413 -0
  29. universal_mcp/agents/codeact01/config.py +77 -0
  30. universal_mcp/agents/codeact01/langgraph_agent.py +14 -0
  31. universal_mcp/agents/codeact01/llm_tool.py +25 -0
  32. universal_mcp/agents/codeact01/prompts.py +246 -0
  33. universal_mcp/agents/codeact01/sandbox.py +162 -0
  34. universal_mcp/agents/{unified → codeact01}/state.py +26 -10
  35. universal_mcp/agents/codeact01/tools.py +648 -0
  36. universal_mcp/agents/{unified → codeact01}/utils.py +175 -11
  37. universal_mcp/agents/llm.py +14 -4
  38. universal_mcp/agents/react.py +3 -3
  39. universal_mcp/agents/sandbox.py +124 -69
  40. universal_mcp/applications/llm/app.py +76 -24
  41. {universal_mcp_agents-0.1.19rc1.dist-info → universal_mcp_agents-0.1.24rc3.dist-info}/METADATA +6 -5
  42. universal_mcp_agents-0.1.24rc3.dist-info/RECORD +66 -0
  43. universal_mcp/agents/codeact/__init__.py +0 -3
  44. universal_mcp/agents/codeact/agent.py +0 -240
  45. universal_mcp/agents/codeact/models.py +0 -11
  46. universal_mcp/agents/codeact/prompts.py +0 -82
  47. universal_mcp/agents/codeact/sandbox.py +0 -85
  48. universal_mcp/agents/codeact/state.py +0 -11
  49. universal_mcp/agents/codeact/utils.py +0 -68
  50. universal_mcp/agents/codeact0/playbook_agent.py +0 -355
  51. universal_mcp/agents/unified/README.md +0 -45
  52. universal_mcp/agents/unified/__init__.py +0 -3
  53. universal_mcp/agents/unified/agent.py +0 -289
  54. universal_mcp/agents/unified/prompts.py +0 -192
  55. universal_mcp/agents/unified/tools.py +0 -188
  56. universal_mcp_agents-0.1.19rc1.dist-info/RECORD +0 -64
  57. {universal_mcp_agents-0.1.19rc1.dist-info → universal_mcp_agents-0.1.24rc3.dist-info}/WHEEL +0 -0
@@ -1,289 +0,0 @@
1
- import json
2
- import re
3
- from typing import Literal, cast
4
-
5
- from langchain_core.messages import AIMessage, ToolMessage
6
- from langchain_core.tools import StructuredTool
7
- from langchain_core.tools import tool as create_tool
8
- from langgraph.checkpoint.base import BaseCheckpointSaver
9
- from langgraph.graph import START, StateGraph
10
- from langgraph.types import Command, RetryPolicy
11
- from loguru import logger
12
- from universal_mcp.tools.registry import ToolRegistry
13
- from universal_mcp.types import ToolConfig, ToolFormat
14
-
15
- from universal_mcp.agents.base import BaseAgent
16
- from universal_mcp.agents.llm import load_chat_model
17
- from universal_mcp.agents.utils import convert_tool_ids_to_dict, filter_retry_on, get_message_text
18
-
19
- from .llm_tool import smart_print
20
- from .prompts import (
21
- PLAYBOOK_CONFIRMING_PROMPT,
22
- PLAYBOOK_GENERATING_PROMPT,
23
- PLAYBOOK_PLANNING_PROMPT,
24
- create_default_prompt,
25
- )
26
- from .sandbox import eval_unsafe
27
- from .state import CodeActState
28
- from .tools import create_meta_tools, enter_playbook_mode, get_valid_tools
29
- from .utils import inject_context, smart_truncate
30
-
31
-
32
- class UnifiedAgent(BaseAgent):
33
- def __init__(
34
- self,
35
- name: str,
36
- instructions: str,
37
- model: str,
38
- memory: BaseCheckpointSaver | None = None,
39
- tools: ToolConfig | None = None,
40
- registry: ToolRegistry | None = None,
41
- playbook_registry: object | None = None,
42
- sandbox_timeout: int = 20,
43
- **kwargs,
44
- ):
45
- super().__init__(
46
- name=name,
47
- instructions=instructions,
48
- model=model,
49
- memory=memory,
50
- **kwargs,
51
- )
52
- self.model_instance = load_chat_model(model)
53
- self.tools_config = tools or {}
54
- self.registry = registry
55
- self.playbook_registry = playbook_registry
56
- self.sandbox_timeout = sandbox_timeout
57
- self.eval_fn = eval_unsafe
58
- if self.tools_config and not self.registry:
59
- raise ValueError("Registry must be provided with tools")
60
-
61
- async def _build_graph(self): # noqa: PLR0915
62
- meta_tools = create_meta_tools(self.registry)
63
- additional_tools = [smart_print, meta_tools["web_search"]]
64
- self.additional_tools = [t if isinstance(t, StructuredTool) else create_tool(t) for t in additional_tools]
65
- self.default_tools = await self.registry.export_tools(self.tools_config, ToolFormat.LANGCHAIN)
66
-
67
- async def call_model(state: CodeActState) -> Command[Literal["sandbox", "execute_tools"]]:
68
- self.exported_tools = []
69
-
70
- selected_tool_ids = state.get("selected_tool_ids", [])
71
- self.exported_tools = await self.registry.export_tools(selected_tool_ids, ToolFormat.LANGCHAIN)
72
- all_tools = self.exported_tools + self.additional_tools
73
- self.final_instructions, self.tools_context = create_default_prompt(all_tools, self.instructions)
74
- messages = [{"role": "user", "content": self.final_instructions}] + state["messages"]
75
-
76
- if state.get("output"):
77
- messages.append(
78
- {
79
- "role": "system",
80
- "content": f"The last code execution resulted in this output:\n{state['output']}",
81
- }
82
- )
83
-
84
- # Run the model and potentially loop for reflection
85
- model_with_tools = self.model_instance.bind_tools(
86
- tools=[
87
- enter_playbook_mode,
88
- meta_tools["search_functions"],
89
- meta_tools["load_functions"],
90
- ],
91
- tool_choice="auto",
92
- )
93
- response = cast(AIMessage, model_with_tools.invoke(messages))
94
- response_text = get_message_text(response)
95
- code_match = re.search(r"```python\n(.*?)\n```", response_text, re.DOTALL)
96
-
97
- if code_match:
98
- code = code_match.group(1).strip()
99
- return Command(goto="sandbox", update={"messages": [response], "code": code, "output": ""})
100
- elif response.tool_calls:
101
- return Command(goto="execute_tools", update={"messages": [response]})
102
- else:
103
- return Command(update={"messages": [response]})
104
-
105
- async def execute_tools(state: CodeActState) -> Command[Literal["call_model", "playbook", "sandbox"]]:
106
- """Execute tool calls"""
107
- last_message = state["messages"][-1]
108
- tool_calls = last_message.tool_calls if isinstance(last_message, AIMessage) else []
109
-
110
- tool_messages = []
111
- new_tool_ids = []
112
- ask_user = False
113
- ai_msg = ""
114
- tool_result = ""
115
-
116
- for tool_call in tool_calls:
117
- try:
118
- if tool_call["name"] == "enter_playbook_mode":
119
- tool_message = ToolMessage(
120
- content=json.dumps("Entered Playbook Mode."),
121
- name=tool_call["name"],
122
- tool_call_id=tool_call["id"],
123
- )
124
- return Command(
125
- goto="playbook",
126
- update={"playbook_mode": "planning", "messages": [tool_message]}, # Entered Playbook mode
127
- )
128
- elif tool_call["name"] == "load_functions": # Handle load_functions separately
129
- valid_tools, unconnected_links = await get_valid_tools(
130
- tool_ids=tool_call["args"]["tool_ids"], registry=self.registry
131
- )
132
- new_tool_ids.extend(valid_tools)
133
- # Create tool message response
134
- tool_result = f"Successfully loaded {len(valid_tools)} tools: {valid_tools}"
135
- links = "\n".join(unconnected_links)
136
- if links:
137
- ask_user = True
138
- ai_msg = f"Please login to the following app(s) using the following links and let me know in order to proceed:\n {links} "
139
- elif tool_call["name"] == "search_functions":
140
- tool_result = await meta_tools["search_functions"].ainvoke(tool_call["args"])
141
- except Exception as e:
142
- tool_result = f"Error during {tool_call}: {e}"
143
-
144
- tool_message = ToolMessage(
145
- content=json.dumps(tool_result),
146
- name=tool_call["name"],
147
- tool_call_id=tool_call["id"],
148
- )
149
- tool_messages.append(tool_message)
150
-
151
- if ask_user:
152
- tool_messages.append(AIMessage(content=ai_msg))
153
- return Command(update={"messages": tool_messages, "selected_tool_ids": new_tool_ids})
154
-
155
- return Command(goto="call_model", update={"messages": tool_messages, "selected_tool_ids": new_tool_ids})
156
-
157
- def sandbox(state: CodeActState) -> Command[Literal["call_model"]]:
158
- code = state.get("code")
159
-
160
- if not code:
161
- logger.error("Sandbox called without code")
162
- return Command(
163
- goto="call_model",
164
- update={"output": "Sandbox was called without any code to execute."},
165
- )
166
-
167
- previous_add_context = state.get("add_context", {})
168
- add_context = inject_context(previous_add_context, self.tools_context)
169
- existing_context = state.get("context", {})
170
- context = {**existing_context, **add_context}
171
- # Execute the script in the sandbox
172
-
173
- output, new_context, new_add_context = self.eval_fn(
174
- code, context, previous_add_context, 180
175
- ) # default timeout 3 min
176
- output = smart_truncate(output)
177
-
178
- return Command(
179
- goto="call_model",
180
- update={
181
- "output": output,
182
- "code": "",
183
- "context": new_context,
184
- "add_context": new_add_context,
185
- },
186
- )
187
-
188
- def playbook(state: CodeActState) -> Command[Literal["call_model"]]:
189
- playbook_mode = state.get("playbook_mode")
190
- if playbook_mode == "planning":
191
- planning_instructions = self.instructions + PLAYBOOK_PLANNING_PROMPT
192
- messages = [{"role": "system", "content": planning_instructions}] + state["messages"]
193
-
194
- response = self.model_instance.invoke(messages)
195
- response = cast(AIMessage, response)
196
- response_text = get_message_text(response)
197
- # Extract plan from response text between triple backticks
198
- plan_match = re.search(r"```(.*?)```", response_text, re.DOTALL)
199
- if plan_match:
200
- plan = plan_match.group(1).strip()
201
- else:
202
- plan = response_text.strip()
203
- return Command(update={"messages": [response], "playbook_mode": "confirming", "plan": plan})
204
-
205
- elif playbook_mode == "confirming":
206
- confirmation_instructions = self.instructions + PLAYBOOK_CONFIRMING_PROMPT
207
- messages = [{"role": "system", "content": confirmation_instructions}] + state["messages"]
208
- response = self.model_instance.invoke(messages, stream=False)
209
- response = get_message_text(response)
210
- if "true" in response.lower():
211
- return Command(goto="playbook", update={"playbook_mode": "generating"})
212
- else:
213
- return Command(goto="playbook", update={"playbook_mode": "planning"})
214
-
215
- elif playbook_mode == "generating":
216
- generating_instructions = self.instructions + PLAYBOOK_GENERATING_PROMPT
217
- messages = [{"role": "system", "content": generating_instructions}] + state["messages"]
218
- response = cast(AIMessage, self.model_instance.invoke(messages))
219
- raw_content = get_message_text(response)
220
- func_code = raw_content.strip()
221
- func_code = func_code.replace("```python", "").replace("```", "")
222
- func_code = func_code.strip()
223
-
224
- # Extract function name (handle both regular and async functions)
225
- match = re.search(r"^\s*(?:async\s+)?def\s+([a-zA-Z_][a-zA-Z0-9_]*)\s*\(", func_code, re.MULTILINE)
226
- if match:
227
- function_name = match.group(1)
228
- else:
229
- function_name = "generated_playbook"
230
-
231
- # Save or update an Agent using the helper registry
232
- saved_note = ""
233
- try:
234
- if not self.playbook_registry:
235
- raise ValueError("Playbook registry is not configured")
236
-
237
- # Build instructions payload embedding the plan and function code
238
- instructions_payload = {
239
- "playbookPlan": state["plan"],
240
- "playbookScript": {
241
- "name": function_name,
242
- "code": func_code,
243
- },
244
- }
245
-
246
- # Convert tool ids list to dict
247
- tool_dict = convert_tool_ids_to_dict(state["selected_tool_ids"])
248
-
249
- res = self.playbook_registry.create_agent(
250
- name=function_name,
251
- description=f"Generated playbook: {function_name}",
252
- instructions=instructions_payload,
253
- tools=tool_dict,
254
- visibility="private",
255
- )
256
- saved_note = f"Successfully created your playbook! Check it out here: [View Playbook](https://wingmen.info/agents/{res.id})"
257
- except Exception as e:
258
- saved_note = f"Failed to save generated playbook as Agent '{function_name}': {e}"
259
-
260
- # Mock tool call for exit_playbook_mode (for testing/demonstration)
261
- mock_exit_tool_call = {"name": "exit_playbook_mode", "args": {}, "id": "mock_exit_playbook_123"}
262
- mock_assistant_message = AIMessage(content=saved_note, tool_calls=[mock_exit_tool_call])
263
-
264
- # Mock tool response for exit_playbook_mode
265
- mock_exit_tool_response = ToolMessage(
266
- content=json.dumps(f"Exited Playbook Mode.{saved_note}"),
267
- name="exit_playbook_mode",
268
- tool_call_id="mock_exit_playbook_123",
269
- )
270
-
271
- return Command(
272
- update={"messages": [mock_assistant_message, mock_exit_tool_response], "playbook_mode": "normal"}
273
- )
274
-
275
- def route_entry(state: CodeActState) -> Literal["call_model", "playbook"]:
276
- """Route to either normal mode or playbook creation"""
277
- if state.get("playbook_mode") in ["planning", "confirming", "generating"]:
278
- return "playbook"
279
-
280
- return "call_model"
281
-
282
- agent = StateGraph(state_schema=CodeActState)
283
- agent.add_node(call_model, retry_policy=RetryPolicy(max_attempts=3, retry_on=filter_retry_on))
284
- agent.add_node(sandbox)
285
- agent.add_node(playbook)
286
- agent.add_node(execute_tools)
287
- agent.add_conditional_edges(START, route_entry)
288
- # agent.add_edge(START, "call_model")
289
- return agent.compile(checkpointer=self.memory)
@@ -1,192 +0,0 @@
1
- import inspect
2
- import re
3
- from collections.abc import Sequence
4
-
5
- from langchain_core.tools import StructuredTool
6
-
7
- uneditable_prompt = """
8
- You are **Wingmen**, an AI Assistant created by AgentR — a creative, straight-forward, and direct principal software engineer with access to tools.
9
-
10
- Your job is to answer the user's question or perform the task they ask for.
11
- - Answer simple questions (which do not require you to write any code or access any external resources) directly. Note that any operation that involves using ONLY print functions should be answered directly.
12
- - For tasks requiring operations or access to external resources, you should achieve the task by writing Python code snippets inside markdown blocks (e.g., ```python ... ```).
13
- - You also have access to two tools for finding and loading more python functions- `search_functions` and `load_functions`, which you must use for finding functions for using different external applications. Prefer pre-loaded or functions already available when possible, and prioritize connected applications over unconnected ones. When this is not enough to break a tie between similar applications, ask the user.
14
- - In writing or natural language processing tasks DO NOT answer directly. Instead write python code using the AI functions provided to you for tasks like summarizing, text generation, classification, data extraction from text or unstructured data, etc. Avoid hardcoded approaches to classification, data extraction.
15
- - The code you write will be executed in a sandbox environment, and you can use the output of previous executions in your code. variables, functions, imports are retained.
16
- - Read and understand the output of the previous code snippet and use it to answer the user's request. Note that the code output is NOT visible to the user, so after the task is complete, you have to give the output to the user in a markdown format.
17
- - If needed, feel free to ask for more information from the user to clarify the task.
18
-
19
- GUIDELINES for writing code:
20
- - Variables defined at the top level of previous code snippets can be referenced in your code.
21
- - External functions which return a dict or list[dict] are ambiguous. Therefore, you MUST explore the structure of the returned data using `smart_print()` statements before using it, printing keys and values. `smart_print` truncates long strings from data, preventing huge output logs.
22
- - When an operation involves running a fixed set of steps on a list of items, run one run correctly and then use a for loop to run the steps on each item in the list.
23
- - In a single code snippet, try to achieve as much as possible.
24
- - You can only import libraries that come pre-installed with Python. For external functions, use the search and load tools to access them in the code.
25
- - For displaying final results to the user, you must present your output in markdown format, including image links, so that they are rendered and displayed to the user. The code output is NOT visible to the user.
26
- - Call all functions using keyword arguments only, never positional arguments.
27
- - Async Functions (Critical): Use them only as follows-
28
- Case 1: Top-level await without asyncio.run()
29
- Wrap in async function and call with asyncio.run():
30
- async def main():
31
- result = await some_async_function()
32
- return result
33
- asyncio.run(main())
34
- Case 2: Using asyncio.run() directly
35
- If code already contains asyncio.run(), use as-is — do not wrap again:
36
- asyncio.run(some_async_function())
37
- Rules:
38
- - Never use await outside an async function
39
- - Never use await asyncio.run()
40
- - Never nest asyncio.run() calls
41
- """
42
-
43
-
44
- def make_safe_function_name(name: str) -> str:
45
- """Convert a tool name to a valid Python function name."""
46
- # Replace non-alphanumeric characters with underscores
47
- safe_name = re.sub(r"[^a-zA-Z0-9_]", "_", name)
48
- # Ensure the name doesn't start with a digit
49
- if safe_name and safe_name[0].isdigit():
50
- safe_name = f"tool_{safe_name}"
51
- # Handle empty name edge case
52
- if not safe_name:
53
- safe_name = "unnamed_tool"
54
- return safe_name
55
-
56
-
57
- def dedent(text):
58
- """Remove any common leading whitespace from every line in `text`.
59
-
60
- This can be used to make triple-quoted strings line up with the left
61
- edge of the display, while still presenting them in the source code
62
- in indented form.
63
-
64
- Note that tabs and spaces are both treated as whitespace, but they
65
- are not equal: the lines " hello" and "\\thello" are
66
- considered to have no common leading whitespace.
67
-
68
- Entirely blank lines are normalized to a newline character.
69
- """
70
- # Look for the longest leading string of spaces and tabs common to
71
- # all lines.
72
- margin = None
73
- _whitespace_only_re = re.compile("^[ \t]+$", re.MULTILINE)
74
- _leading_whitespace_re = re.compile("(^[ \t]*)(?:[^ \t\n])", re.MULTILINE)
75
- text = _whitespace_only_re.sub("", text)
76
- indents = _leading_whitespace_re.findall(text)
77
- for indent in indents:
78
- if margin is None:
79
- margin = indent
80
-
81
- # Current line more deeply indented than previous winner:
82
- # no change (previous winner is still on top).
83
- elif indent.startswith(margin):
84
- pass
85
-
86
- # Current line consistent with and no deeper than previous winner:
87
- # it's the new winner.
88
- elif margin.startswith(indent):
89
- margin = indent
90
-
91
- # Find the largest common whitespace between current line and previous
92
- # winner.
93
- else:
94
- for i, (x, y) in enumerate(zip(margin, indent)):
95
- if x != y:
96
- margin = margin[:i]
97
- break
98
-
99
- # sanity check (testing/debugging only)
100
- if 0 and margin:
101
- for line in text.split("\n"):
102
- assert not line or line.startswith(margin), f"line = {line!r}, margin = {margin!r}"
103
-
104
- if margin:
105
- text = re.sub(r"(?m)^" + margin, "", text)
106
- return text
107
-
108
-
109
- def indent(text, prefix, predicate=None):
110
- """Adds 'prefix' to the beginning of selected lines in 'text'.
111
-
112
- If 'predicate' is provided, 'prefix' will only be added to the lines
113
- where 'predicate(line)' is True. If 'predicate' is not provided,
114
- it will default to adding 'prefix' to all non-empty lines that do not
115
- consist solely of whitespace characters.
116
- """
117
- if predicate is None:
118
- # str.splitlines(True) doesn't produce empty string.
119
- # ''.splitlines(True) => []
120
- # 'foo\n'.splitlines(True) => ['foo\n']
121
- # So we can use just `not s.isspace()` here.
122
- def predicate(s):
123
- return not s.isspace()
124
-
125
- prefixed_lines = []
126
- for line in text.splitlines(True):
127
- if predicate(line):
128
- prefixed_lines.append(prefix)
129
- prefixed_lines.append(line)
130
-
131
- return "".join(prefixed_lines)
132
-
133
-
134
- def create_default_prompt(
135
- tools: Sequence[StructuredTool],
136
- base_prompt: str | None = None,
137
- ):
138
- system_prompt = uneditable_prompt.strip() + (
139
- "\n\nIn addition to the Python Standard Library, you can use the following external functions:\n"
140
- )
141
- tools_context = {}
142
-
143
- for tool in tools:
144
- if hasattr(tool, "func") and tool.func is not None:
145
- tool_callable = tool.func
146
- is_async = False
147
- elif hasattr(tool, "coroutine") and tool.coroutine is not None:
148
- tool_callable = tool.coroutine
149
- is_async = True
150
- system_prompt += f'''{"async " if is_async else ""}def {tool.name} {str(inspect.signature(tool_callable))}:
151
- """{tool.description}"""
152
- ...
153
- '''
154
- safe_name = make_safe_function_name(tool.name)
155
- tools_context[safe_name] = tool_callable
156
-
157
- if base_prompt and base_prompt.strip():
158
- system_prompt += f"Your goal is to perform the following task:\n\n{base_prompt}"
159
-
160
- return system_prompt, tools_context
161
-
162
-
163
- PLAYBOOK_PLANNING_PROMPT = """Now, you are tasked with creating a reusable playbook from the user's previous workflow.
164
-
165
- TASK: Analyze the conversation history and code execution to create a step-by-step plan for a reusable function. Do not include the searching and loading of tools. Assume that the tools have already been loaded.
166
-
167
- Your plan should:
168
- 1. Identify the key steps in the workflow
169
- 2. Mark user-specific variables that should become the main playbook function parameters using `variable_name` syntax. Intermediate variables should not be highlighted using ``
170
- 3. Keep the logic generic and reusable
171
- 4. Be clear and concise
172
-
173
- Example:
174
- ```
175
- 1. Connect to database using `db_connection_string`
176
- 2. Query user data for `user_id`
177
- 3. Process results and calculate `metric_name`
178
- 4. Send notification to `email_address`
179
- ```
180
-
181
- Now create a plan based on the conversation history. Enclose it between ``` and ```. Ask the user if the plan is okay."""
182
-
183
-
184
- PLAYBOOK_CONFIRMING_PROMPT = """Now, you are tasked with confirming the playbook plan. Return True if the user is happy with the plan, False otherwise. Do not say anything else in your response. The user response will be the last message in the chain.
185
- """
186
-
187
- PLAYBOOK_GENERATING_PROMPT = """Now, you are tasked with generating the playbook function. Return the function in Python code.
188
- Do not include any other text in your response.
189
- The function should be a single, complete piece of code that can be executed independently, based on previously executed code snippets that executed correctly.
190
- The parameters of the function should be the same as the final confirmed playbook plan.
191
- Do not include anything other than python code in your response
192
- """
@@ -1,188 +0,0 @@
1
- import asyncio
2
- from collections import defaultdict
3
- from typing import Any
4
-
5
- from langchain_core.tools import tool
6
- from universal_mcp.tools.registry import ToolRegistry
7
- from universal_mcp.types import ToolFormat
8
-
9
- MAX_LENGHT = 100
10
-
11
-
12
- def enter_playbook_mode():
13
- """Call this function to enter playbook mode. Playbook mode is when the user wants to store a repeated task as a script with some inputs for the future."""
14
- return
15
-
16
-
17
- def exit_playbook_mode():
18
- """Call this function to exit playbook mode. Playbook mode is when the user wants to store a repeated task as a script with some inputs for the future."""
19
- return
20
-
21
-
22
- def create_meta_tools(tool_registry: ToolRegistry) -> dict[str, Any]:
23
- """Create the meta tools for searching and loading tools"""
24
-
25
- @tool
26
- async def search_functions(queries: list[str]) -> str:
27
- """Search for relevant functions given list of queries.
28
- Each single query should be atomic (doable with a single function).
29
- For tasks requiring multiple functions, add separate queries for each subtask"""
30
- try:
31
- # Fetch all connections
32
- connections = await tool_registry.list_connected_apps()
33
- connected_apps = {connection["app_id"] for connection in connections}
34
-
35
- app_tools = defaultdict(set)
36
- MAX_LENGTH = 20
37
-
38
- # Process all queries concurrently
39
- search_tasks = []
40
- for query in queries:
41
- search_tasks.append(_search_query_tools(query))
42
-
43
- query_results = await asyncio.gather(*search_tasks)
44
-
45
- # Aggregate results with limit per app and automatic deduplication
46
- for tools_list in query_results:
47
- for tool in tools_list:
48
- app = tool["id"].split("__")[0]
49
- tool_id = tool["id"]
50
-
51
- # Check if within limit and add to set (automatically deduplicates)
52
- if len(app_tools[app]) < MAX_LENGTH:
53
- cleaned_desc = tool["description"].split("Context:")[0].strip()
54
- app_tools[app].add(f"{tool_id}: {cleaned_desc}")
55
-
56
- # Build result string efficiently
57
- result_parts = []
58
- for app, tools in app_tools.items():
59
- app_status = "connected" if app in connected_apps else "NOT connected"
60
- result_parts.append(f"Tools from {app} (status: {app_status} by user):")
61
- # Convert set to sorted list for consistent output
62
- for tool in sorted(tools):
63
- result_parts.append(f" - {tool}")
64
- result_parts.append("") # Empty line between apps
65
-
66
- result_parts.append("Call load_functions to select the required functions only.")
67
- return "\n".join(result_parts)
68
-
69
- except Exception as e:
70
- return f"Error: {e}"
71
-
72
- async def _search_query_tools(query: str) -> list[dict]:
73
- """Helper function to search apps and tools for a single query."""
74
- # Start both searches concurrently
75
- tools_search_task = tool_registry.search_tools(query, limit=10)
76
- apps_search_task = tool_registry.search_apps(query, limit=4)
77
-
78
- # Wait for both to complete
79
- tools_from_general_search, apps_list = await asyncio.gather(tools_search_task, apps_search_task)
80
-
81
- # Create tasks for searching tools from each app
82
- app_tool_tasks = [tool_registry.search_tools(query, limit=5, app_id=app["id"]) for app in apps_list]
83
-
84
- # Wait for all app-specific tool searches to complete
85
- app_tools_results = await asyncio.gather(*app_tool_tasks)
86
-
87
- # Combine all results
88
- tools_list = list(tools_from_general_search)
89
- for app_tools in app_tools_results:
90
- tools_list.extend(app_tools)
91
-
92
- return tools_list
93
-
94
- @tool
95
- async def load_functions(tool_ids: list[str]) -> str:
96
- """Load specific functions by their IDs for use in subsequent steps.
97
-
98
- Args:
99
- tool_ids: Function ids in the form 'app__function'. Example: 'google_mail__send_email'
100
-
101
- Returns:
102
- Confirmation message about loaded functions
103
- """
104
- return f"Successfully loaded {len(tool_ids)} functions: {tool_ids}"
105
-
106
- @tool
107
- async def web_search(query: str) -> dict:
108
- """
109
- Get an LLM answer to a question informed by Exa search results.
110
-
111
- This tool performs an Exa `/answer` request, which:
112
- 1. Provides a **direct answer** for factual queries (e.g., "What is the capital of France?" → "Paris")
113
- 2. Generates a **summary with citations** for open-ended questions
114
- (e.g., "What is the state of AI in healthcare?" → A detailed summary with source links)
115
-
116
- Args:
117
- query (str): The question or topic to answer.
118
- Returns:
119
- dict: A structured response containing only:
120
- - answer (str): Generated answer
121
- - citations (list[dict]): List of cited sources
122
- """
123
- await tool_registry.export_tools(["exa__answer"], ToolFormat.LANGCHAIN)
124
- response = await tool_registry.call_tool("exa__answer", {"query": query, "text": True})
125
-
126
- # Extract only desired fields
127
- return {
128
- "answer": response.get("answer"),
129
- "citations": response.get("citations", []),
130
- }
131
-
132
- return {"search_functions": search_functions, "load_functions": load_functions, "web_search": web_search}
133
-
134
-
135
- async def get_valid_tools(tool_ids: list[str], registry: ToolRegistry) -> tuple[list[str], list[str]]:
136
- """For a given list of tool_ids, validates the tools and returns a list of links for the apps that have not been logged in"""
137
- correct, incorrect = [], []
138
- connections = await registry.list_connected_apps()
139
- connected_apps = {connection["app_id"] for connection in connections}
140
- unconnected = set()
141
- unconnected_links = []
142
- app_tool_list: dict[str, set[str]] = {}
143
-
144
- # Group tool_ids by app for fewer registry calls
145
- app_to_tools: dict[str, list[tuple[str, str]]] = {}
146
- for tool_id in tool_ids:
147
- if "__" not in tool_id:
148
- incorrect.append(tool_id)
149
- continue
150
- app, tool_name = tool_id.split("__", 1)
151
- app_to_tools.setdefault(app, []).append((tool_id, tool_name))
152
-
153
- # Fetch all apps concurrently
154
- async def fetch_tools(app: str):
155
- try:
156
- tools_dict = await registry.list_tools(app)
157
- return app, {tool_unit["name"] for tool_unit in tools_dict}
158
- except Exception:
159
- return app, None
160
-
161
- results = await asyncio.gather(*(fetch_tools(app) for app in app_to_tools))
162
-
163
- # Build map of available tools per app
164
- for app, tools in results:
165
- if tools is not None:
166
- app_tool_list[app] = tools
167
-
168
- # Validate tool_ids
169
- for app, tool_entries in app_to_tools.items():
170
- available = app_tool_list.get(app)
171
- if available is None:
172
- incorrect.extend(tool_id for tool_id, _ in tool_entries)
173
- continue
174
- if app not in connected_apps and app not in unconnected:
175
- unconnected.add(app)
176
- text = registry.client.get_authorization_url(app)
177
- start = text.find(":") + 1
178
- end = text.find(". R", start)
179
- url = text[start:end].strip()
180
- markdown_link = f"[{app}]({url})"
181
- unconnected_links.append(markdown_link)
182
- for tool_id, tool_name in tool_entries:
183
- if tool_name in available:
184
- correct.append(tool_id)
185
- else:
186
- incorrect.append(tool_id)
187
-
188
- return correct, unconnected_links