universal-mcp-agents 0.1.17__py3-none-any.whl → 0.1.19__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of universal-mcp-agents might be problematic. Click here for more details.
- universal_mcp/agents/__init__.py +3 -1
- universal_mcp/agents/base.py +3 -0
- universal_mcp/agents/cli.py +0 -3
- universal_mcp/agents/codeact0/__main__.py +0 -6
- universal_mcp/agents/codeact0/llm_tool.py +1 -103
- universal_mcp/agents/codeact0/playbook_agent.py +26 -30
- universal_mcp/agents/codeact0/prompts.py +35 -96
- universal_mcp/agents/codeact0/tools.py +34 -22
- universal_mcp/agents/codeact0/utils.py +1 -1
- universal_mcp/agents/sandbox.py +90 -0
- universal_mcp/applications/filesystem/__init__.py +0 -0
- universal_mcp/applications/filesystem/app.py +160 -0
- universal_mcp/applications/llm/__init__.py +2 -2
- universal_mcp/applications/llm/app.py +165 -23
- {universal_mcp_agents-0.1.17.dist-info → universal_mcp_agents-0.1.19.dist-info}/METADATA +1 -1
- {universal_mcp_agents-0.1.17.dist-info → universal_mcp_agents-0.1.19.dist-info}/RECORD +17 -14
- {universal_mcp_agents-0.1.17.dist-info → universal_mcp_agents-0.1.19.dist-info}/WHEEL +0 -0
universal_mcp/agents/__init__.py
CHANGED
|
@@ -9,7 +9,9 @@ from universal_mcp.agents.react import ReactAgent
|
|
|
9
9
|
from universal_mcp.agents.simple import SimpleAgent
|
|
10
10
|
|
|
11
11
|
|
|
12
|
-
def get_agent(
|
|
12
|
+
def get_agent(
|
|
13
|
+
agent_name: Literal["react", "simple", "builder", "bigtool", "codeact-script", "codeact-repl"],
|
|
14
|
+
):
|
|
13
15
|
if agent_name == "react":
|
|
14
16
|
return ReactAgent
|
|
15
17
|
elif agent_name == "simple":
|
universal_mcp/agents/base.py
CHANGED
|
@@ -49,8 +49,11 @@ class BaseAgent:
|
|
|
49
49
|
run_metadata.update(metadata)
|
|
50
50
|
|
|
51
51
|
run_config = {
|
|
52
|
+
"recursion_limit": 25,
|
|
52
53
|
"configurable": {"thread_id": thread_id},
|
|
53
54
|
"metadata": run_metadata,
|
|
55
|
+
"run_id": thread_id,
|
|
56
|
+
"run_name": self.name,
|
|
54
57
|
}
|
|
55
58
|
|
|
56
59
|
async for event, meta in self._graph.astream(
|
universal_mcp/agents/cli.py
CHANGED
|
@@ -28,9 +28,6 @@ def run(name: str = "react"):
|
|
|
28
28
|
"model": "anthropic/claude-sonnet-4-20250514",
|
|
29
29
|
"registry": AgentrRegistry(client=client),
|
|
30
30
|
"memory": MemorySaver(),
|
|
31
|
-
"tools": {
|
|
32
|
-
"google_mail": ["send_email"],
|
|
33
|
-
},
|
|
34
31
|
}
|
|
35
32
|
agent_cls = get_agent(name)
|
|
36
33
|
agent = agent_cls(name=name, **params)
|
|
@@ -19,12 +19,6 @@ async def main():
|
|
|
19
19
|
memory=memory,
|
|
20
20
|
)
|
|
21
21
|
print("Starting agent...")
|
|
22
|
-
# await agent.ainit()
|
|
23
|
-
# await agent.run_interactive()
|
|
24
|
-
# async for event in agent.stream(
|
|
25
|
-
# user_input="Fetch unsubscribe links from my Gmail inbox for promo emails I have received in the last 7 days"
|
|
26
|
-
# ):
|
|
27
|
-
# print(event.content, end="")
|
|
28
22
|
result = await agent.invoke(
|
|
29
23
|
user_input="Fetch unsubscribe links from my Gmail inbox for promo emails I have received in the last 7 days"
|
|
30
24
|
)
|
|
@@ -27,7 +27,7 @@ def smart_print(data: Any) -> None:
|
|
|
27
27
|
Args:
|
|
28
28
|
data: Either a dictionary with string keys, or a list of such dictionaries
|
|
29
29
|
"""
|
|
30
|
-
print(light_copy(data)) # noqa
|
|
30
|
+
print(light_copy(data)) # noqa: T201
|
|
31
31
|
|
|
32
32
|
|
|
33
33
|
def creative_writer(
|
|
@@ -275,105 +275,3 @@ def data_extractor(
|
|
|
275
275
|
.invoke(prompt)
|
|
276
276
|
)
|
|
277
277
|
return cast(dict[str, Any], response)
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
# news_articles_schema = {
|
|
281
|
-
# "type": "object",
|
|
282
|
-
# "properties": {
|
|
283
|
-
# "articles": {
|
|
284
|
-
# "type": "array",
|
|
285
|
-
# "title": "Articles",
|
|
286
|
-
# "description": "List of news articles",
|
|
287
|
-
# "items": {
|
|
288
|
-
# "type": "object",
|
|
289
|
-
# "properties": {
|
|
290
|
-
# "headline": {
|
|
291
|
-
# "type": "string",
|
|
292
|
-
# "title": "Headline",
|
|
293
|
-
# "description": "The headline of the news article"
|
|
294
|
-
# },
|
|
295
|
-
# "url": {
|
|
296
|
-
# "type": "string",
|
|
297
|
-
# "title": "URL",
|
|
298
|
-
# "description": "The URL of the news article"
|
|
299
|
-
# }
|
|
300
|
-
# },
|
|
301
|
-
# "required": ["headline", "url"],
|
|
302
|
-
# }
|
|
303
|
-
# }
|
|
304
|
-
# },
|
|
305
|
-
# "required": ["articles"],
|
|
306
|
-
# }
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
# news_articles_schema = {
|
|
310
|
-
# "title": "NewsArticleList",
|
|
311
|
-
# "description": "A list of news articles with headlines and URLs",
|
|
312
|
-
# "type": "object",
|
|
313
|
-
# "properties": {
|
|
314
|
-
# "articles": {
|
|
315
|
-
# "type": "array",
|
|
316
|
-
# "items": {
|
|
317
|
-
# "type": "object",
|
|
318
|
-
# "properties": {
|
|
319
|
-
# "headline": {
|
|
320
|
-
# "type": "string"
|
|
321
|
-
# },
|
|
322
|
-
# "url": {
|
|
323
|
-
# "type": "string"
|
|
324
|
-
# }
|
|
325
|
-
# },
|
|
326
|
-
# "required": ["headline", "url"]
|
|
327
|
-
# }
|
|
328
|
-
# }
|
|
329
|
-
# },
|
|
330
|
-
# "required": ["articles"]
|
|
331
|
-
# }
|
|
332
|
-
# model = init_chat_model(model="claude-4-sonnet-20250514", temperature=0)
|
|
333
|
-
# structured_model = model.with_structured_output(news_articles_schema)
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
# class TwitterComment(BaseModel):
|
|
337
|
-
# skip: bool
|
|
338
|
-
# reason: str
|
|
339
|
-
# comment: str
|
|
340
|
-
|
|
341
|
-
# twitter_comment_schema = {
|
|
342
|
-
# "title": "TwitterComment",
|
|
343
|
-
# "description": "A twitter comment to engage with followers",
|
|
344
|
-
# "type": "object",
|
|
345
|
-
# "properties": {
|
|
346
|
-
# "skip": {
|
|
347
|
-
# "type": "boolean"
|
|
348
|
-
# },
|
|
349
|
-
# "reason": {
|
|
350
|
-
# "type": "string"
|
|
351
|
-
# },
|
|
352
|
-
# "comment": {
|
|
353
|
-
# "type": "string"
|
|
354
|
-
# },
|
|
355
|
-
# "tagged_profiles": {
|
|
356
|
-
# "type": "array",
|
|
357
|
-
# "items": {
|
|
358
|
-
# "type": "string"
|
|
359
|
-
# }
|
|
360
|
-
# }
|
|
361
|
-
# },
|
|
362
|
-
# "required": ["skip", "reason"]
|
|
363
|
-
# }
|
|
364
|
-
|
|
365
|
-
# comment = {
|
|
366
|
-
# "tweet_id": "08109402",
|
|
367
|
-
# "handle": "@iamnishant",
|
|
368
|
-
# "text": "Hey really loved this tweet! Well said 💯"
|
|
369
|
-
# }
|
|
370
|
-
|
|
371
|
-
# comment_instructions = (
|
|
372
|
-
# "Goal is to engage with my twitter followers who have commented on my tweets."
|
|
373
|
-
# "Please generate a single line, context-aware, conversational reply for the given comment."
|
|
374
|
-
# "- Use social media language (can use hinglish)."
|
|
375
|
-
# "- Skip the reply, if the comment is too generic."
|
|
376
|
-
# "- Also tag relevant people in the reply."
|
|
377
|
-
# )
|
|
378
|
-
|
|
379
|
-
# my_reply = call_llm(comment_instructions, comment, twitter_comment_schema)
|
|
@@ -2,18 +2,16 @@ import inspect
|
|
|
2
2
|
import json
|
|
3
3
|
import re
|
|
4
4
|
from collections.abc import Callable
|
|
5
|
-
from dataclasses import dataclass
|
|
6
|
-
from pathlib import Path
|
|
7
5
|
from typing import Literal, cast
|
|
8
6
|
|
|
9
|
-
from langchain_core.messages import AIMessage,
|
|
7
|
+
from langchain_core.messages import AIMessage, ToolMessage
|
|
10
8
|
from langchain_core.tools import StructuredTool
|
|
11
9
|
from langchain_core.tools import tool as create_tool
|
|
12
10
|
from langgraph.checkpoint.base import BaseCheckpointSaver
|
|
13
11
|
from langgraph.graph import START, StateGraph
|
|
14
12
|
from langgraph.types import Command, RetryPolicy
|
|
15
13
|
from universal_mcp.tools.registry import ToolRegistry
|
|
16
|
-
from universal_mcp.types import
|
|
14
|
+
from universal_mcp.types import ToolConfig, ToolFormat
|
|
17
15
|
|
|
18
16
|
from universal_mcp.agents.base import BaseAgent
|
|
19
17
|
from universal_mcp.agents.codeact0.llm_tool import ai_classify, call_llm, data_extractor, smart_print
|
|
@@ -22,10 +20,14 @@ from universal_mcp.agents.codeact0.prompts import (
|
|
|
22
20
|
)
|
|
23
21
|
from universal_mcp.agents.codeact0.sandbox import eval_unsafe, execute_ipython_cell
|
|
24
22
|
from universal_mcp.agents.codeact0.state import CodeActState
|
|
25
|
-
from universal_mcp.agents.codeact0.tools import
|
|
23
|
+
from universal_mcp.agents.codeact0.tools import (
|
|
24
|
+
create_meta_tools,
|
|
25
|
+
enter_playbook_mode,
|
|
26
|
+
get_valid_tools,
|
|
27
|
+
)
|
|
26
28
|
from universal_mcp.agents.codeact0.utils import inject_context, smart_truncate
|
|
27
29
|
from universal_mcp.agents.llm import load_chat_model
|
|
28
|
-
from universal_mcp.agents.utils import filter_retry_on, get_message_text
|
|
30
|
+
from universal_mcp.agents.utils import convert_tool_ids_to_dict, filter_retry_on, get_message_text
|
|
29
31
|
|
|
30
32
|
PLAYBOOK_PLANNING_PROMPT = """Now, you are tasked with creating a reusable playbook from the user's previous workflow.
|
|
31
33
|
|
|
@@ -48,7 +50,6 @@ Example:
|
|
|
48
50
|
Now create a plan based on the conversation history. Enclose it between ``` and ```. Ask the user if the plan is okay."""
|
|
49
51
|
|
|
50
52
|
|
|
51
|
-
|
|
52
53
|
PLAYBOOK_CONFIRMING_PROMPT = """Now, you are tasked with confirming the playbook plan. Return True if the user is happy with the plan, False otherwise. Do not say anything else in your response. The user response will be the last message in the chain.
|
|
53
54
|
"""
|
|
54
55
|
|
|
@@ -80,7 +81,7 @@ class CodeActPlaybookAgent(BaseAgent):
|
|
|
80
81
|
memory=memory,
|
|
81
82
|
**kwargs,
|
|
82
83
|
)
|
|
83
|
-
self.model_instance = load_chat_model(model
|
|
84
|
+
self.model_instance = load_chat_model(model)
|
|
84
85
|
self.tools_config = tools or []
|
|
85
86
|
self.registry = registry
|
|
86
87
|
self.playbook_registry = playbook_registry
|
|
@@ -92,20 +93,19 @@ class CodeActPlaybookAgent(BaseAgent):
|
|
|
92
93
|
meta_tools = create_meta_tools(self.registry)
|
|
93
94
|
additional_tools = [smart_print, data_extractor, ai_classify, call_llm, meta_tools["web_search"]]
|
|
94
95
|
self.additional_tools = [t if isinstance(t, StructuredTool) else create_tool(t) for t in additional_tools]
|
|
96
|
+
|
|
95
97
|
async def call_model(state: CodeActState) -> Command[Literal["sandbox", "execute_tools"]]:
|
|
96
98
|
self.exported_tools = []
|
|
97
99
|
if self.tools_config:
|
|
98
100
|
# Convert dict format to list format if needed
|
|
99
101
|
if isinstance(self.tools_config, dict):
|
|
100
102
|
self.tools_config = [
|
|
101
|
-
f"{provider}__{tool}"
|
|
102
|
-
for provider, tools in self.tools_config.items()
|
|
103
|
-
for tool in tools
|
|
103
|
+
f"{provider}__{tool}" for provider, tools in self.tools_config.items() for tool in tools
|
|
104
104
|
]
|
|
105
105
|
if not self.registry:
|
|
106
106
|
raise ValueError("Tools are configured but no registry is provided")
|
|
107
107
|
# Langchain tools are fine
|
|
108
|
-
self.tools_config.extend(state.get(
|
|
108
|
+
self.tools_config.extend(state.get("selected_tool_ids", []))
|
|
109
109
|
self.exported_tools = await self.registry.export_tools(self.tools_config, ToolFormat.LANGCHAIN)
|
|
110
110
|
self.final_instructions, self.tools_context = create_default_prompt(
|
|
111
111
|
self.exported_tools, self.additional_tools, self.instructions
|
|
@@ -167,7 +167,7 @@ class CodeActPlaybookAgent(BaseAgent):
|
|
|
167
167
|
)
|
|
168
168
|
return Command(
|
|
169
169
|
goto="playbook",
|
|
170
|
-
update={"playbook_mode": "planning", "messages": [tool_message]},
|
|
170
|
+
update={"playbook_mode": "planning", "messages": [tool_message]}, # Entered Playbook mode
|
|
171
171
|
)
|
|
172
172
|
elif tool_call["name"] == "execute_ipython_cell":
|
|
173
173
|
return Command(goto="sandbox")
|
|
@@ -184,8 +184,13 @@ class CodeActPlaybookAgent(BaseAgent):
|
|
|
184
184
|
ai_msg = f"Please login to the following app(s) using the following links and let me know in order to proceed:\n {links} "
|
|
185
185
|
elif tool_call["name"] == "search_functions":
|
|
186
186
|
tool_result = await meta_tools["search_functions"].ainvoke(tool_call["args"])
|
|
187
|
+
else:
|
|
188
|
+
raise Exception(
|
|
189
|
+
f"Unexpected tool call: {tool_call['name']}. "
|
|
190
|
+
"tool calls must be one of 'enter_playbook_mode', 'execute_ipython_cell', 'load_functions', or 'search_functions'"
|
|
191
|
+
)
|
|
187
192
|
except Exception as e:
|
|
188
|
-
tool_result =
|
|
193
|
+
tool_result = str(e)
|
|
189
194
|
|
|
190
195
|
tool_message = ToolMessage(
|
|
191
196
|
content=json.dumps(tool_result),
|
|
@@ -261,13 +266,12 @@ class CodeActPlaybookAgent(BaseAgent):
|
|
|
261
266
|
response = cast(AIMessage, response)
|
|
262
267
|
response_text = get_message_text(response)
|
|
263
268
|
# Extract plan from response text between triple backticks
|
|
264
|
-
plan_match = re.search(r
|
|
269
|
+
plan_match = re.search(r"```(.*?)```", response_text, re.DOTALL)
|
|
265
270
|
if plan_match:
|
|
266
271
|
plan = plan_match.group(1).strip()
|
|
267
272
|
else:
|
|
268
273
|
plan = response_text.strip()
|
|
269
274
|
return Command(update={"messages": [response], "playbook_mode": "confirming", "plan": plan})
|
|
270
|
-
|
|
271
275
|
|
|
272
276
|
elif playbook_mode == "confirming":
|
|
273
277
|
confirmation_instructions = self.instructions + PLAYBOOK_CONFIRMING_PROMPT
|
|
@@ -279,8 +283,6 @@ class CodeActPlaybookAgent(BaseAgent):
|
|
|
279
283
|
else:
|
|
280
284
|
return Command(goto="playbook", update={"playbook_mode": "planning"})
|
|
281
285
|
|
|
282
|
-
|
|
283
|
-
|
|
284
286
|
elif playbook_mode == "generating":
|
|
285
287
|
generating_instructions = self.instructions + PLAYBOOK_GENERATING_PROMPT
|
|
286
288
|
messages = [{"role": "system", "content": generating_instructions}] + state["messages"]
|
|
@@ -327,25 +329,19 @@ class CodeActPlaybookAgent(BaseAgent):
|
|
|
327
329
|
saved_note = f"Failed to save generated playbook as Agent '{function_name}': {e}"
|
|
328
330
|
|
|
329
331
|
# Mock tool call for exit_playbook_mode (for testing/demonstration)
|
|
330
|
-
mock_exit_tool_call = {
|
|
331
|
-
|
|
332
|
-
"args": {},
|
|
333
|
-
"id": "mock_exit_playbook_123"
|
|
334
|
-
}
|
|
335
|
-
mock_assistant_message = AIMessage(
|
|
336
|
-
content=saved_note,
|
|
337
|
-
tool_calls=[mock_exit_tool_call]
|
|
338
|
-
)
|
|
332
|
+
mock_exit_tool_call = {"name": "exit_playbook_mode", "args": {}, "id": "mock_exit_playbook_123"}
|
|
333
|
+
mock_assistant_message = AIMessage(content=saved_note, tool_calls=[mock_exit_tool_call])
|
|
339
334
|
|
|
340
|
-
|
|
341
335
|
# Mock tool response for exit_playbook_mode
|
|
342
336
|
mock_exit_tool_response = ToolMessage(
|
|
343
337
|
content=json.dumps(f"Exited Playbook Mode.{saved_note}"),
|
|
344
338
|
name="exit_playbook_mode",
|
|
345
|
-
tool_call_id="mock_exit_playbook_123"
|
|
339
|
+
tool_call_id="mock_exit_playbook_123",
|
|
346
340
|
)
|
|
347
341
|
|
|
348
|
-
return Command(
|
|
342
|
+
return Command(
|
|
343
|
+
update={"messages": [mock_assistant_message, mock_exit_tool_response], "playbook_mode": "normal"}
|
|
344
|
+
)
|
|
349
345
|
|
|
350
346
|
def route_entry(state: CodeActState) -> Literal["call_model", "playbook"]:
|
|
351
347
|
"""Route to either normal mode or playbook creation"""
|
|
@@ -9,102 +9,41 @@ from universal_mcp.agents.codeact0.utils import schema_to_signature
|
|
|
9
9
|
uneditable_prompt = """
|
|
10
10
|
You are **Wingmen**, an AI Assistant created by AgentR — a creative, straight-forward, and direct principal software engineer with access to tools.
|
|
11
11
|
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
-
|
|
15
|
-
-
|
|
16
|
-
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
-
|
|
27
|
-
- `
|
|
28
|
-
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
Any searching, loading, or executing must be done using a tool/function call. Never answer manually if a tool/function is appropriate.
|
|
48
|
-
|
|
49
|
-
2. **Use Existing Functions First**
|
|
50
|
-
Use existing functions if available. Otherwise, use `search_functions` with a concise query describing the task.
|
|
51
|
-
|
|
52
|
-
3. **Load Only Relevant Tools**
|
|
53
|
-
When calling `load_functions`, include only relevant function IDs.
|
|
54
|
-
- Prefer connected applications over unconnected ones.
|
|
55
|
-
- If multiple functions match (i.e. if none are connected, or multiple are connected), ask the user to choose.
|
|
56
|
-
- After loading a tool, you do not need to import/declare it again. It can be called directly in further cells.
|
|
57
|
-
|
|
58
|
-
4. **Follow First Turn Process Strictly**
|
|
59
|
-
On the **first turn**, do only **one** of the following:
|
|
60
|
-
- Handle directly (if trivial)
|
|
61
|
-
- Use a tool/function (`execute_ipython_cell`, `search_functions`, etc.)
|
|
62
|
-
|
|
63
|
-
**Do not extend the conversation on the first message.**
|
|
64
|
-
|
|
65
|
-
## Coding Rules
|
|
66
|
-
|
|
67
|
-
- Use `smart_print()` to inspect unknown structures, especially those received from function outputs, before looping or branching.
|
|
68
|
-
- Validate logic with a single item before processing lists or large inputs.
|
|
69
|
-
- Try to achieve as much as possible in a single code block.
|
|
70
|
-
- Use only pre-installed Python libraries. Do import them once before using.
|
|
71
|
-
- Outer level functions, variables, classes, and imports declared previously can be used in later cells.
|
|
72
|
-
- For all functions, call using keyword arguments only. DO NOT use any positional arguments.
|
|
73
|
-
|
|
74
|
-
### **Async Function Usage — Critical**
|
|
75
|
-
|
|
76
|
-
When calling asynchronous functions:
|
|
77
|
-
- You must define or use an **inner async function**.
|
|
78
|
-
- Use `await` only **inside** that async function.
|
|
79
|
-
- Run it using `asyncio.run(<function_name>())` **without** `await` at the outer level.
|
|
80
|
-
|
|
81
|
-
**Wrong - Using `await` outside an async function**
|
|
82
|
-
```
|
|
83
|
-
result = await some_async_function()
|
|
84
|
-
```
|
|
85
|
-
**Wrong - Attaching await before asyncio.run**.
|
|
86
|
-
`await asyncio.run(main())`
|
|
87
|
-
These will raise SyntaxError: 'await' outside async function
|
|
88
|
-
The correct method is the following-
|
|
89
|
-
```
|
|
90
|
-
import asyncio
|
|
91
|
-
async def some_async_function():
|
|
92
|
-
...
|
|
93
|
-
|
|
94
|
-
async def main():
|
|
95
|
-
result = await some_async_function()
|
|
96
|
-
print(result)
|
|
97
|
-
|
|
98
|
-
asyncio.run(main())
|
|
99
|
-
#or
|
|
100
|
-
result = asyncio.run(some_async_function(arg1 = <arg1>))
|
|
101
|
-
```
|
|
102
|
-
## Output Formatting
|
|
103
|
-
- All code results must be returned in **Markdown**.
|
|
104
|
-
- The user cannot see raw output, so format results clearly:
|
|
105
|
-
- Use tables for structured data.
|
|
106
|
-
- Provide links for files or images.
|
|
107
|
-
- Be explicit in formatting to ensure readability.
|
|
12
|
+
Your job is to answer the user's question or perform the task they ask for.
|
|
13
|
+
- Answer simple questions (which do not require you to write any code or access any external resources) directly. Note that any operation that involves using ONLY print functions should be answered directly.
|
|
14
|
+
- For task requiring operations or access to external resources, you should achieve the task by executing Python code snippets.
|
|
15
|
+
- You have access to `execute_ipython_cell` tool that allows you to execute Python code in an IPython notebook cell.
|
|
16
|
+
- You also have access to two tools for finding and loading more python functions- `search_functions` and `load_functions`, which you must use for finding functions for using different external applications.
|
|
17
|
+
- Prefer pre-loaded or functions already available when possible.
|
|
18
|
+
- Prioritize connected applications over unconnected ones from the output of `search_functions`.
|
|
19
|
+
- When multiple apps are connected, or none of the apps are connected, ask the user to choose the application(s).
|
|
20
|
+
- In writing or natural language processing tasks DO NOT answer directly. Instead use `execute_ipython_cell` tool with the AI functions provided to you for tasks like summarizing, text generation, classification, data extraction from text or unstructured data, etc. Avoid hardcoded approaches to classification, data extraction.
|
|
21
|
+
- The code you write will be executed in a sandbox environment, and you can use the output of previous executions in your code. variables, functions, imports are retained.
|
|
22
|
+
- Read and understand the output of the previous code snippet and use it to answer the user's request. Note that the code output is NOT visible to the user, so after the task is complete, you have to give the output to the user in a markdown format.
|
|
23
|
+
- If needed, feel free to ask for more information from the user (without using the `execute_ipython_cell` tool) to clarify the task.
|
|
24
|
+
|
|
25
|
+
GUIDELINES for writing code:
|
|
26
|
+
- Variables defined at the top level of previous code snippets can be referenced in your code.
|
|
27
|
+
- External functions which return a dict or list[dict] are ambiguous. Therefore, you MUST explore the structure of the returned data using `smart_print()` statements before using it, printing keys and values. `smart_print` truncates long strings from data, preventing huge output logs.
|
|
28
|
+
- When an operation involves running a fixed set of steps on a list of items, run one run correctly and then use a for loop to run the steps on each item in the list.
|
|
29
|
+
- In a single code snippet, try to achieve as much as possible.
|
|
30
|
+
- You can only import libraries that come pre-installed with Python. For external functions, use the search and load tools to access them in the code.
|
|
31
|
+
- For displaying final results to the user, you must present your output in markdown format, including image links, so that they are rendered and displayed to the user. The code output is NOT visible to the user.
|
|
32
|
+
- Call all functions using keyword arguments only, never positional arguments.
|
|
33
|
+
- Async Functions (Critical): Use them only as follows-
|
|
34
|
+
Case 1: Top-level await without asyncio.run()
|
|
35
|
+
Wrap in async function and call with asyncio.run():
|
|
36
|
+
async def main():
|
|
37
|
+
result = await some_async_function()
|
|
38
|
+
return result
|
|
39
|
+
asyncio.run(main())
|
|
40
|
+
Case 2: Using asyncio.run() directly
|
|
41
|
+
If code already contains asyncio.run(), use as-is — do not wrap again:
|
|
42
|
+
asyncio.run(some_async_function())
|
|
43
|
+
Rules:
|
|
44
|
+
- Never use await outside an async function
|
|
45
|
+
- Never use await asyncio.run()
|
|
46
|
+
- Never nest asyncio.run() calls
|
|
108
47
|
"""
|
|
109
48
|
|
|
110
49
|
|
|
@@ -6,18 +6,19 @@ from langchain_core.tools import tool
|
|
|
6
6
|
from universal_mcp.tools.registry import ToolRegistry
|
|
7
7
|
from universal_mcp.types import ToolFormat
|
|
8
8
|
|
|
9
|
-
MAX_LENGHT=100
|
|
9
|
+
MAX_LENGHT = 100
|
|
10
|
+
|
|
10
11
|
|
|
11
12
|
def enter_playbook_mode():
|
|
12
13
|
"""Call this function to enter playbook mode. Playbook mode is when the user wants to store a repeated task as a script with some inputs for the future."""
|
|
13
14
|
return
|
|
14
15
|
|
|
16
|
+
|
|
15
17
|
def exit_playbook_mode():
|
|
16
18
|
"""Call this function to exit playbook mode. Playbook mode is when the user wants to store a repeated task as a script with some inputs for the future."""
|
|
17
19
|
return
|
|
18
20
|
|
|
19
21
|
|
|
20
|
-
|
|
21
22
|
def create_meta_tools(tool_registry: ToolRegistry) -> dict[str, Any]:
|
|
22
23
|
"""Create the meta tools for searching and loading tools"""
|
|
23
24
|
|
|
@@ -46,7 +47,7 @@ def create_meta_tools(tool_registry: ToolRegistry) -> dict[str, Any]:
|
|
|
46
47
|
for tool in tools_list:
|
|
47
48
|
app = tool["id"].split("__")[0]
|
|
48
49
|
tool_id = tool["id"]
|
|
49
|
-
|
|
50
|
+
|
|
50
51
|
# Check if within limit and add to set (automatically deduplicates)
|
|
51
52
|
if len(app_tools[app]) < MAX_LENGTH:
|
|
52
53
|
cleaned_desc = tool["description"].split("Context:")[0].strip()
|
|
@@ -54,6 +55,9 @@ def create_meta_tools(tool_registry: ToolRegistry) -> dict[str, Any]:
|
|
|
54
55
|
|
|
55
56
|
# Build result string efficiently
|
|
56
57
|
result_parts = []
|
|
58
|
+
apps_in_results = set(app_tools.keys())
|
|
59
|
+
connected_apps_in_results = apps_in_results.intersection(connected_apps)
|
|
60
|
+
|
|
57
61
|
for app, tools in app_tools.items():
|
|
58
62
|
app_status = "connected" if app in connected_apps else "NOT connected"
|
|
59
63
|
result_parts.append(f"Tools from {app} (status: {app_status} by user):")
|
|
@@ -62,6 +66,13 @@ def create_meta_tools(tool_registry: ToolRegistry) -> dict[str, Any]:
|
|
|
62
66
|
result_parts.append(f" - {tool}")
|
|
63
67
|
result_parts.append("") # Empty line between apps
|
|
64
68
|
|
|
69
|
+
# Add connection status information
|
|
70
|
+
if len(connected_apps_in_results) == 0 and len(apps_in_results) > 0:
|
|
71
|
+
result_parts.append("Connection Status: None of the apps in the results are connected. You must ask the user to choose the application.")
|
|
72
|
+
elif len(connected_apps_in_results) > 1:
|
|
73
|
+
connected_list = ", ".join(connected_apps_in_results)
|
|
74
|
+
result_parts.append(f"Connection Status: Multiple apps are connected ({connected_list}). You must ask the user to select which application they want to use.")
|
|
75
|
+
|
|
65
76
|
result_parts.append("Call load_functions to select the required functions only.")
|
|
66
77
|
return "\n".join(result_parts)
|
|
67
78
|
|
|
@@ -103,29 +114,30 @@ def create_meta_tools(tool_registry: ToolRegistry) -> dict[str, Any]:
|
|
|
103
114
|
return f"Successfully loaded {len(tool_ids)} functions: {tool_ids}"
|
|
104
115
|
|
|
105
116
|
@tool
|
|
106
|
-
async def web_search(query: str) ->
|
|
107
|
-
"""
|
|
117
|
+
async def web_search(query: str) -> dict:
|
|
118
|
+
"""
|
|
119
|
+
Get an LLM answer to a question informed by Exa search results.
|
|
108
120
|
|
|
109
|
-
|
|
121
|
+
This tool performs an Exa `/answer` request, which:
|
|
122
|
+
1. Provides a **direct answer** for factual queries (e.g., "What is the capital of France?" → "Paris")
|
|
123
|
+
2. Generates a **summary with citations** for open-ended questions
|
|
124
|
+
(e.g., "What is the state of AI in healthcare?" → A detailed summary with source links)
|
|
110
125
|
|
|
126
|
+
Args:
|
|
127
|
+
query (str): The question or topic to answer.
|
|
111
128
|
Returns:
|
|
112
|
-
|
|
113
|
-
-
|
|
114
|
-
-
|
|
115
|
-
- url (str): The web URL of the result
|
|
116
|
-
- publishedDate (str): ISO 8601 formatted date (e.g., "2025-01-01T00:00:00.000Z")
|
|
117
|
-
- author (str): Author name (may be empty string)
|
|
118
|
-
- summary (str): Text summary/snippet of the content
|
|
119
|
-
- image (str): URL to associated image (if available)
|
|
120
|
-
|
|
121
|
-
Example:
|
|
122
|
-
results = await web_search(query="python programming")
|
|
129
|
+
dict: A structured response containing only:
|
|
130
|
+
- answer (str): Generated answer
|
|
131
|
+
- citations (list[dict]): List of cited sources
|
|
123
132
|
"""
|
|
124
|
-
await tool_registry.export_tools(["
|
|
125
|
-
response = await tool_registry.call_tool(
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
return
|
|
133
|
+
await tool_registry.export_tools(["exa__answer"], ToolFormat.LANGCHAIN)
|
|
134
|
+
response = await tool_registry.call_tool("exa__answer", {"query": query, "text": True})
|
|
135
|
+
|
|
136
|
+
# Extract only desired fields
|
|
137
|
+
return {
|
|
138
|
+
"answer": response.get("answer"),
|
|
139
|
+
"citations": response.get("citations", []),
|
|
140
|
+
}
|
|
129
141
|
|
|
130
142
|
return {"search_functions": search_functions, "load_functions": load_functions, "web_search": web_search}
|
|
131
143
|
|
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
import contextlib
|
|
2
|
+
import inspect
|
|
3
|
+
import io
|
|
4
|
+
import queue
|
|
5
|
+
import re
|
|
6
|
+
import socket
|
|
7
|
+
import threading
|
|
8
|
+
import types
|
|
9
|
+
from typing import Any
|
|
10
|
+
|
|
11
|
+
from universal_mcp.agents.codeact0.utils import derive_context
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class Sandbox:
|
|
15
|
+
"""
|
|
16
|
+
A class to execute code safely in a sandboxed environment with a timeout.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
def __init__(self, timeout: int = 180):
|
|
20
|
+
"""
|
|
21
|
+
Initializes the Sandbox.
|
|
22
|
+
Args:
|
|
23
|
+
timeout: The timeout for code execution in seconds.
|
|
24
|
+
"""
|
|
25
|
+
self.timeout = timeout
|
|
26
|
+
self._locals: dict[str, Any] = {}
|
|
27
|
+
self.add_context: dict[str, Any] = {}
|
|
28
|
+
|
|
29
|
+
def run(self, code: str) -> tuple[str, dict[str, Any], dict[str, Any]]:
|
|
30
|
+
"""
|
|
31
|
+
Execute code safely with a timeout.
|
|
32
|
+
- Returns (output_str, filtered_locals_dict, new_add_context)
|
|
33
|
+
- Errors or timeout are returned as output_str.
|
|
34
|
+
- Previous variables in _locals persist across calls.
|
|
35
|
+
"""
|
|
36
|
+
|
|
37
|
+
EXCLUDE_TYPES = (
|
|
38
|
+
types.ModuleType,
|
|
39
|
+
type(re.match("", "")),
|
|
40
|
+
type(threading.Lock()),
|
|
41
|
+
type(threading.RLock()),
|
|
42
|
+
threading.Event,
|
|
43
|
+
threading.Condition,
|
|
44
|
+
threading.Semaphore,
|
|
45
|
+
queue.Queue,
|
|
46
|
+
socket.socket,
|
|
47
|
+
io.IOBase,
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
result_container = {"output": "<no output>"}
|
|
51
|
+
|
|
52
|
+
def target():
|
|
53
|
+
try:
|
|
54
|
+
with contextlib.redirect_stdout(io.StringIO()) as f:
|
|
55
|
+
exec(code, self._locals, self._locals)
|
|
56
|
+
result_container["output"] = f.getvalue() or "<code ran, no output printed to stdout>"
|
|
57
|
+
except Exception as e:
|
|
58
|
+
result_container["output"] = "Error during execution: " + str(e)
|
|
59
|
+
|
|
60
|
+
thread = threading.Thread(target=target)
|
|
61
|
+
thread.start()
|
|
62
|
+
thread.join(self.timeout)
|
|
63
|
+
|
|
64
|
+
if thread.is_alive():
|
|
65
|
+
result_container["output"] = f"Code timeout: code execution exceeded {self.timeout} seconds."
|
|
66
|
+
|
|
67
|
+
# Filter locals for picklable/storable variables
|
|
68
|
+
all_vars = {}
|
|
69
|
+
for key, value in self._locals.items():
|
|
70
|
+
if key == "__builtins__":
|
|
71
|
+
continue
|
|
72
|
+
if inspect.iscoroutine(value) or inspect.iscoroutinefunction(value):
|
|
73
|
+
continue
|
|
74
|
+
if inspect.isasyncgen(value) or inspect.isasyncgenfunction(value):
|
|
75
|
+
continue
|
|
76
|
+
if isinstance(value, EXCLUDE_TYPES):
|
|
77
|
+
continue
|
|
78
|
+
if not callable(value) or not hasattr(value, "__name__"):
|
|
79
|
+
all_vars[key] = value
|
|
80
|
+
|
|
81
|
+
self._locals = all_vars
|
|
82
|
+
|
|
83
|
+
# Safely derive context
|
|
84
|
+
try:
|
|
85
|
+
self.add_context = derive_context(code, self.add_context)
|
|
86
|
+
except Exception:
|
|
87
|
+
# Keep the old context if derivation fails
|
|
88
|
+
pass
|
|
89
|
+
|
|
90
|
+
return result_container["output"], self._locals, self.add_context
|
|
File without changes
|
|
@@ -0,0 +1,160 @@
|
|
|
1
|
+
import fnmatch
|
|
2
|
+
import os
|
|
3
|
+
import pathlib
|
|
4
|
+
import re
|
|
5
|
+
import uuid
|
|
6
|
+
|
|
7
|
+
from loguru import logger
|
|
8
|
+
from universal_mcp.applications.application import BaseApplication
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class FileSystemApp(BaseApplication):
|
|
12
|
+
"""
|
|
13
|
+
A class to safely interact with the filesystem within a specified working directory.
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
def __init__(self, working_dir: str | None = None, **kwargs):
|
|
17
|
+
"""
|
|
18
|
+
Initializes the FileSystemApp with a working directory.
|
|
19
|
+
|
|
20
|
+
Args:
|
|
21
|
+
working_dir: The absolute path to the directory where all operations will be performed.
|
|
22
|
+
"""
|
|
23
|
+
super().__init__(name="Filesystem")
|
|
24
|
+
|
|
25
|
+
self.set_working_dir(working_dir or f"/tmp/{uuid.uuid4()}")
|
|
26
|
+
|
|
27
|
+
def set_working_dir(self, working_dir: str):
|
|
28
|
+
self.working_dir = pathlib.Path(working_dir).absolute()
|
|
29
|
+
# Create dir if not exists
|
|
30
|
+
self.working_dir.mkdir(parents=True, exist_ok=True)
|
|
31
|
+
|
|
32
|
+
def _is_safe_path(self, path: str) -> bool:
|
|
33
|
+
"""
|
|
34
|
+
Checks if the given path is within the working directory.
|
|
35
|
+
|
|
36
|
+
Args:
|
|
37
|
+
path: The path to check.
|
|
38
|
+
|
|
39
|
+
Returns:
|
|
40
|
+
True if the path is safe, False otherwise.
|
|
41
|
+
"""
|
|
42
|
+
common_path = os.path.commonpath([self.working_dir, path])
|
|
43
|
+
return common_path == str(self.working_dir)
|
|
44
|
+
|
|
45
|
+
def create_file(self, path: str, content: str = "") -> None:
|
|
46
|
+
"""
|
|
47
|
+
Creates a file with the given content.
|
|
48
|
+
|
|
49
|
+
Args:
|
|
50
|
+
path: The relative path to the file to create.
|
|
51
|
+
content: The content to write to the file.
|
|
52
|
+
|
|
53
|
+
Raises:
|
|
54
|
+
ValueError: If the path is outside the working directory.
|
|
55
|
+
"""
|
|
56
|
+
if not self._is_safe_path(path):
|
|
57
|
+
error = f"Path is outside the working directory: {path} vs {self.working_dir}"
|
|
58
|
+
logger.error(error)
|
|
59
|
+
raise ValueError(error)
|
|
60
|
+
|
|
61
|
+
full_path = os.path.join(self.working_dir, path)
|
|
62
|
+
os.makedirs(os.path.dirname(full_path), exist_ok=True)
|
|
63
|
+
with open(full_path, "w") as f:
|
|
64
|
+
f.write(content)
|
|
65
|
+
|
|
66
|
+
def read_file(self, path: str) -> str:
|
|
67
|
+
"""
|
|
68
|
+
Reads the content of a file.
|
|
69
|
+
|
|
70
|
+
Args:
|
|
71
|
+
path: The relative path to the file to read.
|
|
72
|
+
|
|
73
|
+
Returns:
|
|
74
|
+
The content of the file.
|
|
75
|
+
|
|
76
|
+
Raises:
|
|
77
|
+
ValueError: If the path is outside the working directory.
|
|
78
|
+
FileNotFoundError: If the file does not exist.
|
|
79
|
+
"""
|
|
80
|
+
if not self._is_safe_path(path):
|
|
81
|
+
raise ValueError("Path is outside the working directory.")
|
|
82
|
+
|
|
83
|
+
full_path = os.path.join(self.working_dir, path)
|
|
84
|
+
if not os.path.exists(full_path):
|
|
85
|
+
raise FileNotFoundError(f"File not found: {full_path}")
|
|
86
|
+
|
|
87
|
+
with open(full_path) as f:
|
|
88
|
+
return f.read()
|
|
89
|
+
|
|
90
|
+
def list_files(self, path: str = ".", recursive: bool = False) -> list[str]:
|
|
91
|
+
"""
|
|
92
|
+
Lists files in a directory.
|
|
93
|
+
|
|
94
|
+
Args:
|
|
95
|
+
path: The relative path to the directory to list.
|
|
96
|
+
recursive: Whether to list files recursively.
|
|
97
|
+
|
|
98
|
+
Returns:
|
|
99
|
+
A list of file paths.
|
|
100
|
+
|
|
101
|
+
Raises:
|
|
102
|
+
ValueError: If the path is outside the working directory.
|
|
103
|
+
"""
|
|
104
|
+
if not self._is_safe_path(path):
|
|
105
|
+
raise ValueError("Path is outside the working directory.")
|
|
106
|
+
|
|
107
|
+
full_path = os.path.join(self.working_dir, path)
|
|
108
|
+
if not os.path.isdir(full_path):
|
|
109
|
+
raise ValueError(f"Path '{path}' is not a directory.")
|
|
110
|
+
|
|
111
|
+
files = []
|
|
112
|
+
if recursive:
|
|
113
|
+
for root, _, filenames in os.walk(full_path):
|
|
114
|
+
for filename in filenames:
|
|
115
|
+
files.append(os.path.relpath(os.path.join(root, filename), self.working_dir))
|
|
116
|
+
else:
|
|
117
|
+
for item in os.listdir(full_path):
|
|
118
|
+
item_path = os.path.join(full_path, item)
|
|
119
|
+
if os.path.isfile(item_path):
|
|
120
|
+
files.append(os.path.relpath(item_path, self.working_dir))
|
|
121
|
+
return files
|
|
122
|
+
|
|
123
|
+
def grep(self, pattern: str, path: str = ".", file_pattern: str = "*") -> list[str]:
|
|
124
|
+
"""
|
|
125
|
+
Searches for a pattern in files.
|
|
126
|
+
|
|
127
|
+
Args:
|
|
128
|
+
pattern: The regex pattern to search for.
|
|
129
|
+
path: The relative path to the directory to search in.
|
|
130
|
+
file_pattern: A glob pattern to filter files to search.
|
|
131
|
+
|
|
132
|
+
Returns:
|
|
133
|
+
A list of strings with "file:line_number:line" for each match.
|
|
134
|
+
|
|
135
|
+
Raises:
|
|
136
|
+
ValueError: If the path is outside the working directory.
|
|
137
|
+
"""
|
|
138
|
+
if not self._is_safe_path(path):
|
|
139
|
+
raise ValueError("Path is outside the working directory.")
|
|
140
|
+
|
|
141
|
+
full_path = os.path.join(self.working_dir, path)
|
|
142
|
+
if not os.path.isdir(full_path):
|
|
143
|
+
raise ValueError(f"Path '{path}' is not a directory.")
|
|
144
|
+
|
|
145
|
+
matches = []
|
|
146
|
+
for root, _, filenames in os.walk(full_path):
|
|
147
|
+
for filename in fnmatch.filter(filenames, file_pattern):
|
|
148
|
+
file_path = os.path.join(root, filename)
|
|
149
|
+
try:
|
|
150
|
+
with open(file_path, errors="ignore") as f:
|
|
151
|
+
for i, line in enumerate(f, 1):
|
|
152
|
+
if re.search(pattern, line):
|
|
153
|
+
relative_path = os.path.relpath(file_path, self.working_dir)
|
|
154
|
+
matches.append(f"{relative_path}:{i}:{line.strip()}")
|
|
155
|
+
except OSError:
|
|
156
|
+
continue # Skip files that can't be opened
|
|
157
|
+
return matches
|
|
158
|
+
|
|
159
|
+
def list_tools(self):
|
|
160
|
+
return [self.create_file, self.grep, self.list_files, self.read_file]
|
|
@@ -1,3 +1,3 @@
|
|
|
1
|
-
from .app import
|
|
1
|
+
from .app import LlmApp
|
|
2
2
|
|
|
3
|
-
__all__ = ["
|
|
3
|
+
__all__ = ["LlmApp"]
|
|
@@ -2,10 +2,11 @@ import json
|
|
|
2
2
|
from typing import Any, Literal, cast
|
|
3
3
|
|
|
4
4
|
from langchain.chat_models import init_chat_model
|
|
5
|
-
from langchain_openai import AzureChatOpenAI
|
|
6
5
|
from pydantic import BaseModel, Field
|
|
7
6
|
from universal_mcp.applications.application import BaseApplication
|
|
8
7
|
|
|
8
|
+
from universal_mcp.agents.llm import load_chat_model
|
|
9
|
+
|
|
9
10
|
MAX_RETRIES = 3
|
|
10
11
|
|
|
11
12
|
|
|
@@ -28,7 +29,7 @@ def _get_context_as_string(source: Any | list[Any] | dict[str, Any]) -> str:
|
|
|
28
29
|
return "\n".join(f"<{k}>\n{str(v)}\n</{k}>" for k, v in source.items())
|
|
29
30
|
|
|
30
31
|
|
|
31
|
-
class
|
|
32
|
+
class LlmApp(BaseApplication):
|
|
32
33
|
"""
|
|
33
34
|
An application for leveraging Large Language Models (LLMs) for advanced text processing tasks.
|
|
34
35
|
"""
|
|
@@ -40,22 +41,34 @@ class LLMApp(BaseApplication):
|
|
|
40
41
|
def generate_text(
|
|
41
42
|
self,
|
|
42
43
|
task: str,
|
|
43
|
-
context:
|
|
44
|
+
context: str | list[str] | dict[str, str] = "",
|
|
44
45
|
tone: str = "normal",
|
|
45
46
|
output_format: Literal["markdown", "html", "plain"] = "markdown",
|
|
46
47
|
length: Literal["very-short", "concise", "normal", "long"] = "concise",
|
|
47
48
|
) -> str:
|
|
48
49
|
"""
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
50
|
+
Given a high-level writing task and context, returns a well-written text
|
|
51
|
+
that achieves the task, given the context.
|
|
52
|
+
|
|
53
|
+
Example Call:
|
|
54
|
+
generate_text("Summarize this website with the goal of making it easy to understand.", web_content)
|
|
55
|
+
generate_text("Make a markdown table summarizing the key differences between doc_1 and doc_2.", {"doc_1": str(doc_1), "doc_2": str(doc_2)})
|
|
56
|
+
generate_text("Summarize all the provided documents.", [doc_1, doc_2, doc_3])
|
|
57
|
+
|
|
58
|
+
Important:
|
|
59
|
+
- Include specifics of the goal in the context verbatim.
|
|
60
|
+
- Be precise and direct in the task, and include as much context as possible.
|
|
61
|
+
- Include relevant high-level goals or intent in the task.
|
|
62
|
+
- You can provide multiple documents as input, and reference them in the task.
|
|
63
|
+
- You MUST provide the contents of any source documents to `generate_text`.
|
|
64
|
+
- NEVER use `generate_text` to produce JSON for a Pydantic model.
|
|
52
65
|
|
|
53
66
|
Args:
|
|
54
67
|
task: The main writing task or directive.
|
|
55
|
-
context: A single string, list of strings, or
|
|
56
|
-
tone: The desired tone of the output (e.g., "formal", "casual", "technical").
|
|
68
|
+
context: A single string, list of strings, or dict mapping labels to content.
|
|
69
|
+
tone: The desired tone of the output (e.g., "normal", "flirty", "formal", "casual", "crisp", "poetic", "technical", "internet-chat", "smartass", etc.).
|
|
57
70
|
output_format: The desired output format ('markdown', 'html', 'plain').
|
|
58
|
-
length:
|
|
71
|
+
length: Desired length of the output ('very-short', 'concise', 'normal', 'long').
|
|
59
72
|
|
|
60
73
|
Returns:
|
|
61
74
|
The generated text as a string.
|
|
@@ -78,31 +91,63 @@ class LLMApp(BaseApplication):
|
|
|
78
91
|
|
|
79
92
|
full_prompt = f"{prompt}\n\nContext:\n{context_str}\n\n"
|
|
80
93
|
|
|
81
|
-
model =
|
|
94
|
+
model = load_chat_model("azure/gpt-5-mini")
|
|
82
95
|
response = model.with_retry(stop_after_attempt=MAX_RETRIES).invoke(full_prompt)
|
|
83
96
|
return str(response.content)
|
|
84
97
|
|
|
85
98
|
def classify_data(
|
|
86
99
|
self,
|
|
87
|
-
|
|
100
|
+
classification_task_and_requirements: str,
|
|
88
101
|
context: Any | list[Any] | dict[str, Any],
|
|
89
102
|
class_descriptions: dict[str, str],
|
|
90
103
|
) -> dict[str, Any]:
|
|
91
104
|
"""
|
|
92
|
-
Classifies
|
|
105
|
+
Classifies and compares data based on given requirements.
|
|
106
|
+
|
|
107
|
+
Use `classify_data` for tasks which need to classify data into one of many categories.
|
|
108
|
+
If making multiple binary classifications, call `classify_data` for each.
|
|
109
|
+
|
|
110
|
+
Guidance:
|
|
111
|
+
- Prefer to use classify_data operations to compare strings, rather than string ops.
|
|
112
|
+
- Prefer to include an "Unsure" category for classification tasks.
|
|
113
|
+
- The `class_descriptions` dict argument MUST be a map from possible class names to a precise description.
|
|
114
|
+
- Use precise and specific class names and concise descriptions.
|
|
115
|
+
- Pass ALL relevant context, preferably as a dict mapping labels to content.
|
|
116
|
+
- Returned dict maps each possible class name to a probability.
|
|
117
|
+
|
|
118
|
+
Example Usage:
|
|
119
|
+
classification_task_and_requirements = "Does the document contain an address?"
|
|
120
|
+
class_descriptions = {
|
|
121
|
+
"Is_Address": "Valid addresses usually have street names, city, and zip codes.",
|
|
122
|
+
"Not_Address": "Not valid addresses."
|
|
123
|
+
}
|
|
124
|
+
classification = classify_data(
|
|
125
|
+
classification_task_and_requirements,
|
|
126
|
+
{"address": extracted_address},
|
|
127
|
+
class_descriptions
|
|
128
|
+
)
|
|
129
|
+
if classification["probabilities"]["Is_Address"] > 0.5:
|
|
130
|
+
...
|
|
93
131
|
|
|
94
132
|
Args:
|
|
95
|
-
|
|
96
|
-
context: The data to
|
|
97
|
-
class_descriptions:
|
|
133
|
+
classification_task_and_requirements: The classification question and rules.
|
|
134
|
+
context: The data to classify (string, list, or dict).
|
|
135
|
+
class_descriptions: Mapping from class names to descriptions.
|
|
136
|
+
|
|
137
|
+
Tags:
|
|
138
|
+
important
|
|
98
139
|
|
|
99
140
|
Returns:
|
|
100
|
-
|
|
141
|
+
dict: {
|
|
142
|
+
"probabilities": dict[str, float],
|
|
143
|
+
"reason": str,
|
|
144
|
+
"top_class": str,
|
|
145
|
+
}
|
|
101
146
|
"""
|
|
102
147
|
context_str = _get_context_as_string(context)
|
|
103
148
|
|
|
104
149
|
prompt = (
|
|
105
|
-
f"{
|
|
150
|
+
f"{classification_task_and_requirements}\n\n"
|
|
106
151
|
f"This is a classification task.\nPossible classes and descriptions:\n"
|
|
107
152
|
f"{json.dumps(class_descriptions, indent=2)}\n\n"
|
|
108
153
|
f"Context:\n{context_str}\n\n"
|
|
@@ -125,25 +170,61 @@ class LLMApp(BaseApplication):
|
|
|
125
170
|
|
|
126
171
|
def extract_data(
|
|
127
172
|
self,
|
|
128
|
-
|
|
173
|
+
extraction_task: str,
|
|
129
174
|
source: Any | list[Any] | dict[str, Any],
|
|
130
175
|
output_schema: dict[str, Any],
|
|
131
176
|
) -> dict[str, Any]:
|
|
132
177
|
"""
|
|
133
|
-
Extracts structured data from unstructured
|
|
178
|
+
Extracts structured data from unstructured data (documents, webpages, images, large bodies of text),
|
|
179
|
+
returning a dictionary matching the given output_schema.
|
|
180
|
+
|
|
181
|
+
You MUST anticipate Exception raised for unextractable data; skip this item if applicable.
|
|
182
|
+
|
|
183
|
+
Strongly prefer to:
|
|
184
|
+
- Be comprehensive, specific, and precise on the data you want to extract.
|
|
185
|
+
- Use optional fields everywhere.
|
|
186
|
+
- Extract multiple items from each source unless otherwise specified.
|
|
187
|
+
- The more specific your extraction task and output_schema are, the better the results.
|
|
134
188
|
|
|
135
189
|
Args:
|
|
136
|
-
|
|
137
|
-
source: The unstructured data to extract from
|
|
138
|
-
output_schema:
|
|
190
|
+
extraction_task: The directive describing what to extract.
|
|
191
|
+
source: The unstructured data to extract from.
|
|
192
|
+
output_schema: must be a valid JSON schema with top-level 'title' and 'description' keys.
|
|
139
193
|
|
|
140
194
|
Returns:
|
|
141
195
|
A dictionary containing the extracted data, matching the provided schema.
|
|
196
|
+
|
|
197
|
+
Example:
|
|
198
|
+
news_articles_schema = {
|
|
199
|
+
"title": "NewsArticleList",
|
|
200
|
+
"description": "A list of news articles with headlines and URLs",
|
|
201
|
+
"type": "object",
|
|
202
|
+
"properties": {
|
|
203
|
+
"articles": {
|
|
204
|
+
"type": "array",
|
|
205
|
+
"items": {
|
|
206
|
+
"type": "object",
|
|
207
|
+
"properties": {
|
|
208
|
+
"headline": {
|
|
209
|
+
"type": "string"
|
|
210
|
+
},
|
|
211
|
+
"url": {
|
|
212
|
+
"type": "string"
|
|
213
|
+
}
|
|
214
|
+
},
|
|
215
|
+
"required": ["headline", "url"]
|
|
216
|
+
}
|
|
217
|
+
}
|
|
218
|
+
},
|
|
219
|
+
"required": ["articles"]
|
|
220
|
+
}
|
|
221
|
+
|
|
222
|
+
news_articles = extract_data("Extract headlines and their corresponding URLs.", content, news_articles_schema)
|
|
142
223
|
"""
|
|
143
224
|
context_str = _get_context_as_string(source)
|
|
144
225
|
|
|
145
226
|
prompt = (
|
|
146
|
-
f"{
|
|
227
|
+
f"{extraction_task}\n\n"
|
|
147
228
|
f"Context:\n{context_str}\n\n"
|
|
148
229
|
"Return ONLY a valid JSON object that conforms to the provided schema, with no extra text."
|
|
149
230
|
)
|
|
@@ -156,3 +237,64 @@ class LLMApp(BaseApplication):
|
|
|
156
237
|
.invoke(prompt)
|
|
157
238
|
)
|
|
158
239
|
return cast(dict[str, Any], response)
|
|
240
|
+
|
|
241
|
+
def call_llm(
|
|
242
|
+
self,
|
|
243
|
+
task_instructions: str,
|
|
244
|
+
context: Any | list[Any] | dict[str, Any],
|
|
245
|
+
output_schema: dict[str, Any],
|
|
246
|
+
) -> dict[str, Any]:
|
|
247
|
+
"""
|
|
248
|
+
Call a Large Language Model (LLM) with an instruction and contextual information,
|
|
249
|
+
returning a dictionary matching the given output_schema.
|
|
250
|
+
Can be used for tasks like creative writing, llm reasoning based content generation, etc.
|
|
251
|
+
|
|
252
|
+
You MUST anticipate Exceptions in reasoning based tasks which will lead to some empty fields
|
|
253
|
+
in the returned output; skip this item if applicable.
|
|
254
|
+
|
|
255
|
+
General Guidelines:
|
|
256
|
+
- Be comprehensive, specific, and precise on the task instructions.
|
|
257
|
+
- Include as much context as possible.
|
|
258
|
+
- You can provide multiple items in context, and reference them in the task.
|
|
259
|
+
- Include relevant high-level goals or intent in the task.
|
|
260
|
+
- In the output_schema, use required field wherever necessary.
|
|
261
|
+
- The more specific your task instructions and output_schema are, the better the results.
|
|
262
|
+
|
|
263
|
+
Guidelines for content generation tasks:
|
|
264
|
+
- Feel free to add instructions for tone, length, and format (markdown, html, plain-text, xml)
|
|
265
|
+
- Some examples of tone are: "normal", "flirty", "formal", "casual", "crisp", "poetic", "technical", "internet-chat", "smartass", etc.
|
|
266
|
+
- Prefer length to be concise by default. Other examples are: "very-short", "concise", "normal", "long", "2-3 lines", etc.
|
|
267
|
+
- In format prefer plain-text but you can also use markdown and html wherever useful.
|
|
268
|
+
|
|
269
|
+
Args:
|
|
270
|
+
task_instructions: The main directive for the LLM (e.g., "Summarize the article" or "Extract key entities").
|
|
271
|
+
context:
|
|
272
|
+
A dictionary containing named text elements that provide additional
|
|
273
|
+
information for the LLM. Keys are labels (e.g., 'article', 'transcript'),
|
|
274
|
+
values are strings of content.
|
|
275
|
+
output_schema: must be a valid JSON schema with top-level 'title' and 'description' keys.
|
|
276
|
+
|
|
277
|
+
Returns:
|
|
278
|
+
dict: Parsed JSON object matching the desired output_schema.
|
|
279
|
+
|
|
280
|
+
"""
|
|
281
|
+
context_str = _get_context_as_string(context)
|
|
282
|
+
|
|
283
|
+
prompt = f"{task_instructions}\n\nContext:\n{context_str}\n\nReturn ONLY a valid JSON object, no extra text."
|
|
284
|
+
|
|
285
|
+
model = init_chat_model(model="claude-4-sonnet-20250514", temperature=0)
|
|
286
|
+
|
|
287
|
+
response = (
|
|
288
|
+
model.with_structured_output(schema=output_schema, method="json_mode")
|
|
289
|
+
.with_retry(stop_after_attempt=MAX_RETRIES)
|
|
290
|
+
.invoke(prompt)
|
|
291
|
+
)
|
|
292
|
+
return cast(dict[str, Any], response)
|
|
293
|
+
|
|
294
|
+
def list_tools(self):
|
|
295
|
+
return [
|
|
296
|
+
self.generate_text,
|
|
297
|
+
self.classify_data,
|
|
298
|
+
self.extract_data,
|
|
299
|
+
self.call_llm,
|
|
300
|
+
]
|
|
@@ -1,9 +1,10 @@
|
|
|
1
|
-
universal_mcp/agents/__init__.py,sha256=
|
|
2
|
-
universal_mcp/agents/base.py,sha256=
|
|
3
|
-
universal_mcp/agents/cli.py,sha256=
|
|
1
|
+
universal_mcp/agents/__init__.py,sha256=NuS_JOKYeHfAkNA4xRyvxdGZP_tXKfvsmoc2fhkMrj8,1259
|
|
2
|
+
universal_mcp/agents/base.py,sha256=hrlvSv0TLS-ShaKofpHD3kishciql7SSbTKCUHKGObc,7175
|
|
3
|
+
universal_mcp/agents/cli.py,sha256=bXdpgxsOMjclm1STHJgx10ocX9EebQ11DrxH0p6KMZk,943
|
|
4
4
|
universal_mcp/agents/hil.py,sha256=_5PCK6q0goGm8qylJq44aSp2MadP-yCPvhOJYKqWLMo,3808
|
|
5
5
|
universal_mcp/agents/llm.py,sha256=hVRwjZs3MHl5_3BWedmurs2Jt1oZDfFX0Zj9F8KH7fk,1787
|
|
6
6
|
universal_mcp/agents/react.py,sha256=8XQvJ0HLVgc-K0qn9Ml48WGcgUGuIKtL67HatlT6Da0,3334
|
|
7
|
+
universal_mcp/agents/sandbox.py,sha256=Int2O8JNFPlB8c7gb86KRxlNbuV0zdz5_NCo_GMcCds,2876
|
|
7
8
|
universal_mcp/agents/simple.py,sha256=NSATg5TWzsRNS7V3LFiDG28WSOCIwCdcC1g7NRwg2nM,2095
|
|
8
9
|
universal_mcp/agents/utils.py,sha256=P6W9k6XAOBp6tdjC2VTP4tE0B2M4-b1EDmr-ylJ47Pw,7765
|
|
9
10
|
universal_mcp/agents/bigtool/__init__.py,sha256=mZG8dsaCVyKlm82otxtiTA225GIFLUCUUYPEIPF24uw,2299
|
|
@@ -28,23 +29,25 @@ universal_mcp/agents/codeact/sandbox.py,sha256=NjN6ISj8psFtHf8V0w24ChJdUMUWkq7Or
|
|
|
28
29
|
universal_mcp/agents/codeact/state.py,sha256=WTPfpxDlGRnlr5tZuXMg_KU7GS7TZbnrIKslOvZLbQI,565
|
|
29
30
|
universal_mcp/agents/codeact/utils.py,sha256=JUbT_HYGS_D1BzmzoVpORIe7SGur1KgJguTZ_1tZ4JY,1918
|
|
30
31
|
universal_mcp/agents/codeact0/__init__.py,sha256=ebKkpgg-0UnsvDtagEJ2tMer1VsfhmEE5KJcFzUk9fU,133
|
|
31
|
-
universal_mcp/agents/codeact0/__main__.py,sha256=
|
|
32
|
+
universal_mcp/agents/codeact0/__main__.py,sha256=xeqNuawP9M8JVAnkhLesalnpI_TakC49ATJaSCzCsYs,880
|
|
32
33
|
universal_mcp/agents/codeact0/agent.py,sha256=9BInAQr3csES-XHSscmeJlYJ3-wQUHPvLOf-6wFILUU,6695
|
|
33
34
|
universal_mcp/agents/codeact0/config.py,sha256=H-1woj_nhSDwf15F63WYn723y4qlRefXzGxuH81uYF0,2215
|
|
34
35
|
universal_mcp/agents/codeact0/langgraph_agent.py,sha256=ehjMV_Z1118pCFWB_Sa5H7XnUp0udsbUHjfjXjhIQM8,435
|
|
35
|
-
universal_mcp/agents/codeact0/llm_tool.py,sha256=
|
|
36
|
-
universal_mcp/agents/codeact0/playbook_agent.py,sha256=
|
|
37
|
-
universal_mcp/agents/codeact0/prompts.py,sha256=
|
|
36
|
+
universal_mcp/agents/codeact0/llm_tool.py,sha256=q-hiqkKtjVmpyNceFoRgo7hvKh4HtQf_I1VudRUEPR0,11075
|
|
37
|
+
universal_mcp/agents/codeact0/playbook_agent.py,sha256=6ePcpEOrHxoNoaAdKdg9i7Yi6hcJOxBAo1MavCn8J6A,18081
|
|
38
|
+
universal_mcp/agents/codeact0/prompts.py,sha256=2MF0J371Ib7cDXqpW6Ei_CwBpRFmK95neLh9QM4emIY,8708
|
|
38
39
|
universal_mcp/agents/codeact0/sandbox.py,sha256=zMgHrWnQYkSkJb2MzfXvT3euCc4hvqzBE_EbX2_iLxA,3142
|
|
39
40
|
universal_mcp/agents/codeact0/state.py,sha256=Y-Rzn_S7--aXH18KPvyhqDqOOB-miu1lsAmLgmMlaAg,1259
|
|
40
|
-
universal_mcp/agents/codeact0/tools.py,sha256=
|
|
41
|
-
universal_mcp/agents/codeact0/utils.py,sha256=
|
|
41
|
+
universal_mcp/agents/codeact0/tools.py,sha256=qVZLq1YlVKABZdpEqFAzLo04DTFd1ZJi18atfzmxEb8,8374
|
|
42
|
+
universal_mcp/agents/codeact0/utils.py,sha256=jAZItSd3KGDkY9PquSWRIFCj9N26K9Kt0HKQ_jwvvSQ,15944
|
|
42
43
|
universal_mcp/agents/shared/__main__.py,sha256=XxH5qGDpgFWfq7fwQfgKULXGiUgeTp_YKfcxftuVZq8,1452
|
|
43
44
|
universal_mcp/agents/shared/prompts.py,sha256=yjP3zbbuKi87qCj21qwTTicz8TqtkKgnyGSeEjMu3ho,3761
|
|
44
45
|
universal_mcp/agents/shared/tool_node.py,sha256=DC9F-Ri28Pam0u3sXWNODVgmj9PtAEUb5qP1qOoGgfs,9169
|
|
45
|
-
universal_mcp/applications/
|
|
46
|
-
universal_mcp/applications/
|
|
46
|
+
universal_mcp/applications/filesystem/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
47
|
+
universal_mcp/applications/filesystem/app.py,sha256=0TRjjm8YnslVRSmfkXI7qQOAlqWlD1eEn8Jm0xBeigs,5561
|
|
48
|
+
universal_mcp/applications/llm/__init__.py,sha256=_XGRxN3O1--ZS5joAsPf8IlI9Qa6negsJrwJ5VJXno0,46
|
|
49
|
+
universal_mcp/applications/llm/app.py,sha256=oqX3byvlFRmeRo4jJJxUBGy-iTDGm2fplMEKA2pcMtw,12743
|
|
47
50
|
universal_mcp/applications/ui/app.py,sha256=c7OkZsO2fRtndgAzAQbKu-1xXRuRp9Kjgml57YD2NR4,9459
|
|
48
|
-
universal_mcp_agents-0.1.
|
|
49
|
-
universal_mcp_agents-0.1.
|
|
50
|
-
universal_mcp_agents-0.1.
|
|
51
|
+
universal_mcp_agents-0.1.19.dist-info/METADATA,sha256=imopF36ZWDrHP3tweT0GtvIlKdNg_CLa9yZrXvv3s9s,878
|
|
52
|
+
universal_mcp_agents-0.1.19.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
53
|
+
universal_mcp_agents-0.1.19.dist-info/RECORD,,
|
|
File without changes
|