universal-mcp-agents 0.1.12__py3-none-any.whl → 0.1.14__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of universal-mcp-agents might be problematic. Click here for more details.
- universal_mcp/agents/__init__.py +1 -1
- universal_mcp/agents/base.py +2 -0
- universal_mcp/agents/bigtool/__init__.py +1 -1
- universal_mcp/agents/bigtool/agent.py +2 -2
- universal_mcp/agents/bigtool/graph.py +65 -31
- universal_mcp/agents/bigtool/prompts.py +2 -2
- universal_mcp/agents/bigtool/tools.py +18 -4
- universal_mcp/agents/builder/__main__.py +105 -30
- universal_mcp/agents/builder/builder.py +149 -160
- universal_mcp/agents/builder/helper.py +73 -0
- universal_mcp/agents/builder/prompts.py +33 -152
- universal_mcp/agents/builder/state.py +1 -1
- universal_mcp/agents/cli.py +2 -2
- universal_mcp/agents/codeact/agent.py +1 -1
- universal_mcp/agents/codeact/sandbox.py +1 -5
- universal_mcp/agents/codeact0/agent.py +5 -4
- universal_mcp/agents/codeact0/langgraph_agent.py +17 -0
- universal_mcp/agents/codeact0/llm_tool.py +1 -1
- universal_mcp/agents/codeact0/prompts.py +34 -23
- universal_mcp/agents/codeact0/usecases/11-github.yaml +6 -5
- universal_mcp/agents/codeact0/utils.py +42 -63
- universal_mcp/agents/shared/__main__.py +43 -0
- universal_mcp/agents/shared/prompts.py +50 -99
- universal_mcp/agents/shared/tool_node.py +149 -203
- universal_mcp/agents/utils.py +65 -0
- universal_mcp/applications/ui/app.py +2 -2
- {universal_mcp_agents-0.1.12.dist-info → universal_mcp_agents-0.1.14.dist-info}/METADATA +1 -1
- {universal_mcp_agents-0.1.12.dist-info → universal_mcp_agents-0.1.14.dist-info}/RECORD +29 -28
- universal_mcp/agents/codeact0/langgraph_graph.py +0 -17
- universal_mcp/agents/codeact0/legacy_codeact.py +0 -104
- {universal_mcp_agents-0.1.12.dist-info → universal_mcp_agents-0.1.14.dist-info}/WHEEL +0 -0
universal_mcp/agents/__init__.py
CHANGED
|
@@ -3,8 +3,8 @@ from typing import Literal
|
|
|
3
3
|
from universal_mcp.agents.base import BaseAgent
|
|
4
4
|
from universal_mcp.agents.bigtool import BigToolAgent
|
|
5
5
|
from universal_mcp.agents.builder.builder import BuilderAgent
|
|
6
|
-
from universal_mcp.agents.codeact0 import CodeActAgent as CodeActRepl
|
|
7
6
|
from universal_mcp.agents.codeact import CodeActAgent as CodeActScript
|
|
7
|
+
from universal_mcp.agents.codeact0 import CodeActAgent as CodeActRepl
|
|
8
8
|
from universal_mcp.agents.react import ReactAgent
|
|
9
9
|
from universal_mcp.agents.simple import SimpleAgent
|
|
10
10
|
|
universal_mcp/agents/base.py
CHANGED
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
from universal_mcp.agentr.registry import AgentrRegistry
|
|
2
|
-
from universal_mcp.agents.
|
|
2
|
+
from universal_mcp.agents.bigtool import BigToolAgent
|
|
3
3
|
|
|
4
4
|
|
|
5
5
|
async def agent():
|
|
6
|
-
agent_object = await
|
|
6
|
+
agent_object = await BigToolAgent(
|
|
7
7
|
registry=AgentrRegistry(),
|
|
8
8
|
)._build_graph()
|
|
9
9
|
return agent_object
|
|
@@ -7,12 +7,13 @@ from langchain_core.language_models import BaseChatModel
|
|
|
7
7
|
from langchain_core.messages import AIMessage, SystemMessage, ToolMessage
|
|
8
8
|
from langchain_core.tools import BaseTool
|
|
9
9
|
from langgraph.graph import StateGraph
|
|
10
|
-
from langgraph.types import Command
|
|
10
|
+
from langgraph.types import Command, RetryPolicy
|
|
11
11
|
from universal_mcp.tools.registry import ToolRegistry
|
|
12
12
|
from universal_mcp.types import ToolFormat
|
|
13
13
|
|
|
14
14
|
from .state import State
|
|
15
15
|
from .tools import get_valid_tools
|
|
16
|
+
from universal_mcp.agents.utils import filter_retry_on
|
|
16
17
|
|
|
17
18
|
load_dotenv()
|
|
18
19
|
|
|
@@ -31,7 +32,12 @@ def build_graph(
|
|
|
31
32
|
|
|
32
33
|
# Combine meta tools with currently loaded tools
|
|
33
34
|
if len(state["selected_tool_ids"]) > 0:
|
|
34
|
-
|
|
35
|
+
try:
|
|
36
|
+
current_tools = await registry.export_tools(
|
|
37
|
+
tools=state["selected_tool_ids"], format=ToolFormat.LANGCHAIN
|
|
38
|
+
)
|
|
39
|
+
except Exception as e:
|
|
40
|
+
raise Exception(f"Failed to export selected tools: {e}")
|
|
35
41
|
else:
|
|
36
42
|
current_tools = []
|
|
37
43
|
all_tools = (
|
|
@@ -48,23 +54,30 @@ def build_graph(
|
|
|
48
54
|
seen_names.add(tool.name)
|
|
49
55
|
unique_tools.append(tool)
|
|
50
56
|
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
57
|
+
try:
|
|
58
|
+
if isinstance(base_model, ChatAnthropic):
|
|
59
|
+
model_with_tools = base_model.bind_tools(
|
|
60
|
+
unique_tools,
|
|
61
|
+
tool_choice="auto",
|
|
62
|
+
parallel_tool_calls=False,
|
|
63
|
+
cache_control={"type": "ephemeral", "ttl": "1h"},
|
|
64
|
+
)
|
|
65
|
+
else:
|
|
66
|
+
model_with_tools = base_model.bind_tools(
|
|
67
|
+
unique_tools,
|
|
68
|
+
tool_choice="auto",
|
|
69
|
+
parallel_tool_calls=False,
|
|
70
|
+
)
|
|
71
|
+
except Exception as e:
|
|
72
|
+
raise Exception(f"Failed to bind tools to model: {e}")
|
|
64
73
|
|
|
65
74
|
# Get response from model
|
|
66
75
|
messages = [SystemMessage(content=system_prompt), *state["messages"]]
|
|
67
|
-
|
|
76
|
+
|
|
77
|
+
try:
|
|
78
|
+
response = cast(AIMessage, await model_with_tools.ainvoke(messages))
|
|
79
|
+
except Exception as e:
|
|
80
|
+
raise Exception(f"Model invocation failed: {e}")
|
|
68
81
|
|
|
69
82
|
if response.tool_calls:
|
|
70
83
|
return Command(goto="execute_tools", update={"messages": [response]})
|
|
@@ -78,27 +91,40 @@ def build_graph(
|
|
|
78
91
|
|
|
79
92
|
tool_messages = []
|
|
80
93
|
new_tool_ids = []
|
|
94
|
+
ask_user = False
|
|
81
95
|
|
|
82
96
|
for tool_call in tool_calls:
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
97
|
+
try:
|
|
98
|
+
if tool_call["name"] == "load_tools": # Handle load_tools separately
|
|
99
|
+
valid_tools, unconnected_links = await get_valid_tools(tool_ids=tool_call["args"]["tool_ids"], registry=registry)
|
|
100
|
+
new_tool_ids.extend(valid_tools)
|
|
101
|
+
# Create tool message response
|
|
102
|
+
tool_result = f"Successfully loaded {len(valid_tools)} tools: {valid_tools}"
|
|
103
|
+
if unconnected_links:
|
|
104
|
+
ask_user = True
|
|
105
|
+
ai_msg = f"Please login to the following app(s) using the following links and let me know in order to proceed:\n {'\n'.join(unconnected_links)} "
|
|
106
|
+
|
|
107
|
+
elif tool_call["name"] == "search_tools":
|
|
108
|
+
tool_result = await meta_tools["search_tools"].ainvoke(tool_call["args"])
|
|
109
|
+
elif tool_call["name"] == "web_search":
|
|
110
|
+
tool_result = await meta_tools["web_search"].ainvoke(tool_call["args"])
|
|
111
|
+
else:
|
|
112
|
+
# Load tools first
|
|
113
|
+
await registry.export_tools([tool_call["name"]], ToolFormat.LANGCHAIN)
|
|
114
|
+
tool_result = await registry.call_tool(tool_call["name"], tool_call["args"])
|
|
115
|
+
except Exception as e:
|
|
116
|
+
tool_result = f"Error during {tool_call}: {e}"
|
|
117
|
+
|
|
118
|
+
|
|
96
119
|
tool_message = ToolMessage(
|
|
97
120
|
content=json.dumps(tool_result),
|
|
98
121
|
name=tool_call["name"],
|
|
99
122
|
tool_call_id=tool_call["id"],
|
|
100
123
|
)
|
|
101
124
|
tool_messages.append(tool_message)
|
|
125
|
+
if ask_user:
|
|
126
|
+
tool_messages.append(AIMessage(content=ai_msg))
|
|
127
|
+
return Command(update={"messages": tool_messages, "selected_tool_ids": new_tool_ids})
|
|
102
128
|
|
|
103
129
|
return Command(goto="agent", update={"messages": tool_messages, "selected_tool_ids": new_tool_ids})
|
|
104
130
|
|
|
@@ -106,8 +132,16 @@ def build_graph(
|
|
|
106
132
|
workflow = StateGraph(State)
|
|
107
133
|
|
|
108
134
|
# Add nodes
|
|
109
|
-
workflow.add_node(
|
|
110
|
-
|
|
135
|
+
workflow.add_node(
|
|
136
|
+
"agent",
|
|
137
|
+
agent_node,
|
|
138
|
+
retry_policy=RetryPolicy(max_attempts=3, retry_on=filter_retry_on, initial_interval=2, backoff_factor=2),
|
|
139
|
+
)
|
|
140
|
+
workflow.add_node(
|
|
141
|
+
"execute_tools",
|
|
142
|
+
execute_tools_node,
|
|
143
|
+
retry_policy=RetryPolicy(max_attempts=3, retry_on=filter_retry_on, initial_interval=2, backoff_factor=2),
|
|
144
|
+
)
|
|
111
145
|
|
|
112
146
|
# Set entry point
|
|
113
147
|
workflow.set_entry_point("agent")
|
|
@@ -5,9 +5,9 @@ SYSTEM_PROMPT = """You are a helpful AI assistant, called {name}.
|
|
|
5
5
|
**Core Directives:**
|
|
6
6
|
1. **Always Use Tools for Tasks:** For any user request that requires an action (e.g., sending an email, searching for information, creating an event, displaying a chart), you MUST use a tool. Do not refuse a task if a tool might exist for it.
|
|
7
7
|
|
|
8
|
-
2. Check if your existing tools or knowledge can handle the user's request. If they can, use them. If they cannot, you must call the `search_tools` function to find the right tools for the user's request.You must not use the same/similar query multiple times in the list. The list should have multiple queries only if the task has clearly different sub-tasks. If you do not find any specific relevant tools, use the pre-loaded generic tools.
|
|
8
|
+
2. Check if your existing tools or knowledge can handle the user's request. If they can, use them. If they cannot, you must call the `search_tools` function to find the right tools for the user's request. You must not use the same/similar query multiple times in the list. The list should have multiple queries only if the task has clearly different sub-tasks. If you do not find any specific relevant tools, use the pre-loaded generic tools. Only use `search_tools` if your existing capabilities cannot handle the request.
|
|
9
9
|
|
|
10
|
-
3. **Load Tools:** After looking at the output of `search_tools`, you MUST call the `load_tools` function to load only the tools you want to use. Provide the full tool ids, not just the app names. Use your judgement to eliminate irrelevant apps that came up just because of semantic similarity. However, sometimes, multiple apps might be relevant for the same task. Prefer connected apps over unconnected apps while breaking a tie. If more than one relevant app (or none of the relevant apps) are connected, you must ask the user to choose the app. In case the user asks you to use an app that is not connected, call the apps tools normally. The tool will return a link for connecting that you should pass on to the user.
|
|
10
|
+
3. **Load Tools:** After looking at the output of `search_tools`, you MUST call the `load_tools` function to load only the tools you want to use. Provide the full tool ids, not just the app names. Use your judgement to eliminate irrelevant apps that came up just because of semantic similarity. However, sometimes, multiple apps might be relevant for the same task. Prefer connected apps over unconnected apps while breaking a tie. If more than one relevant app (or none of the relevant apps) are connected, you must ask the user to choose the app. In case the user asks you to use an app that is not connected, call the apps tools normally. The tool will return a link for connecting that you should pass on to the user. Only load tools if your existing capabilities cannot handle the request.
|
|
11
11
|
|
|
12
12
|
4. **Strictly Follow the Process:** Your only job in your first turn is to analyze the user's request and answer using existing tools/knowledge or `search_tools` with a concise query describing the core task. Do not engage in conversation, or extend the conversation beyond the user's request.
|
|
13
13
|
|
|
@@ -35,7 +35,8 @@ def create_meta_tools(tool_registry: ToolRegistry) -> dict[str, Any]:
|
|
|
35
35
|
for tool in tools_list:
|
|
36
36
|
app = tool["id"].split("__")[0]
|
|
37
37
|
if len(app_tools[app]) < 5:
|
|
38
|
-
|
|
38
|
+
cleaned_desc = tool['description'].split("Context:")[0].strip()
|
|
39
|
+
app_tools[app].append(f"{tool['id']}: {cleaned_desc}")
|
|
39
40
|
|
|
40
41
|
# Build result string efficiently
|
|
41
42
|
result_parts = []
|
|
@@ -98,8 +99,13 @@ def create_meta_tools(tool_registry: ToolRegistry) -> dict[str, Any]:
|
|
|
98
99
|
return {"search_tools": search_tools, "load_tools": load_tools, "web_search": web_search}
|
|
99
100
|
|
|
100
101
|
|
|
101
|
-
async def get_valid_tools(tool_ids: list[str], registry: ToolRegistry) -> list[str]:
|
|
102
|
+
async def get_valid_tools(tool_ids: list[str], registry: ToolRegistry) -> tuple[list[str], list[str]]:
|
|
103
|
+
"""For a given list of tool_ids, validates the tools and returns a list of links for the apps that have not been logged in"""
|
|
102
104
|
correct, incorrect = [], []
|
|
105
|
+
connections = await registry.list_connected_apps()
|
|
106
|
+
connected_apps = {connection["app_id"] for connection in connections}
|
|
107
|
+
unconnected = set()
|
|
108
|
+
unconnected_links = []
|
|
103
109
|
app_tool_list: dict[str, set[str]] = {}
|
|
104
110
|
|
|
105
111
|
# Group tool_ids by app for fewer registry calls
|
|
@@ -127,15 +133,23 @@ async def get_valid_tools(tool_ids: list[str], registry: ToolRegistry) -> list[s
|
|
|
127
133
|
app_tool_list[app] = tools
|
|
128
134
|
|
|
129
135
|
# Validate tool_ids
|
|
130
|
-
for app, tool_entries in app_to_tools.items():
|
|
136
|
+
for app, tool_entries in app_to_tools.items():
|
|
131
137
|
available = app_tool_list.get(app)
|
|
132
138
|
if available is None:
|
|
133
139
|
incorrect.extend(tool_id for tool_id, _ in tool_entries)
|
|
134
140
|
continue
|
|
141
|
+
if app not in connected_apps and app not in unconnected:
|
|
142
|
+
unconnected.add(app)
|
|
143
|
+
text = registry.client.get_authorization_url(app)
|
|
144
|
+
start = text.find(":") + 1
|
|
145
|
+
end = text.find(".", start)
|
|
146
|
+
url = text[start:end].strip()
|
|
147
|
+
markdown_link = f"[{app}]({url})"
|
|
148
|
+
unconnected_links.append(markdown_link)
|
|
135
149
|
for tool_id, tool_name in tool_entries:
|
|
136
150
|
if tool_name in available:
|
|
137
151
|
correct.append(tool_id)
|
|
138
152
|
else:
|
|
139
153
|
incorrect.append(tool_id)
|
|
140
154
|
|
|
141
|
-
return correct
|
|
155
|
+
return correct, unconnected_links
|
|
@@ -1,12 +1,13 @@
|
|
|
1
1
|
import asyncio
|
|
2
|
-
import json
|
|
3
2
|
from uuid import uuid4
|
|
4
3
|
|
|
5
4
|
from langgraph.checkpoint.memory import MemorySaver
|
|
6
5
|
from loguru import logger
|
|
7
6
|
from universal_mcp.agentr.registry import AgentrRegistry
|
|
7
|
+
from universal_mcp.types import ToolConfig
|
|
8
8
|
|
|
9
9
|
from universal_mcp.agents.builder.builder import BuilderAgent
|
|
10
|
+
from universal_mcp.agents.builder.state import Agent
|
|
10
11
|
|
|
11
12
|
|
|
12
13
|
async def run_interactive_build():
|
|
@@ -18,7 +19,7 @@ async def run_interactive_build():
|
|
|
18
19
|
agent = BuilderAgent(
|
|
19
20
|
name="Builder Agent",
|
|
20
21
|
instructions="You are a builder agent that creates other agents.",
|
|
21
|
-
model="
|
|
22
|
+
model="azure/gpt-4.1",
|
|
22
23
|
registry=registry,
|
|
23
24
|
memory=memory,
|
|
24
25
|
)
|
|
@@ -27,30 +28,37 @@ async def run_interactive_build():
|
|
|
27
28
|
|
|
28
29
|
conversation_script = [
|
|
29
30
|
"Send an email to manoj@agentr.dev with the subject 'Hello' and body 'This is a test of the Gmail agent.' from my Gmail account.",
|
|
30
|
-
"
|
|
31
|
+
"Add the mail to my draft also",
|
|
32
|
+
"also make a reddit post on r/test with the title 'Test Post' and body 'This is a test post from the Reddit agent.'",
|
|
31
33
|
]
|
|
32
34
|
|
|
33
|
-
|
|
35
|
+
# These variables will hold the state between turns
|
|
36
|
+
latest_agent: Agent | None = None
|
|
37
|
+
latest_tools: ToolConfig | None = None
|
|
38
|
+
|
|
34
39
|
for i, user_input in enumerate(conversation_script):
|
|
35
40
|
logger.info(f"\n--- Conversation Turn {i + 1} ---")
|
|
36
41
|
logger.info(f"User Request: '{user_input}'")
|
|
37
42
|
|
|
38
|
-
|
|
39
|
-
|
|
43
|
+
# The first turn is a new build (agent=None).
|
|
44
|
+
# Subsequent turns are modifications, passing the previously generated agent.
|
|
45
|
+
result = await agent.invoke(user_input=user_input, thread_id=thread_id, agent=latest_agent, tools=latest_tools)
|
|
40
46
|
|
|
41
|
-
|
|
42
|
-
|
|
47
|
+
# Update the latest state for the next turn
|
|
48
|
+
latest_agent = result.get("generated_agent")
|
|
49
|
+
latest_tools = result.get("tool_config")
|
|
43
50
|
|
|
44
|
-
if
|
|
51
|
+
if latest_agent:
|
|
45
52
|
logger.info("--- Generated/Modified Agent ---")
|
|
46
|
-
logger.info(f"Name: {
|
|
47
|
-
logger.info(f"Description: {
|
|
48
|
-
logger.info(f"Expertise: {
|
|
49
|
-
logger.info(f"Instructions:\n{
|
|
53
|
+
logger.info(f"Name: {latest_agent.name}")
|
|
54
|
+
logger.info(f"Description: {latest_agent.description}")
|
|
55
|
+
logger.info(f"Expertise: {latest_agent.expertise}")
|
|
56
|
+
logger.info(f"Instructions:\n{latest_agent.instructions}")
|
|
57
|
+
logger.info(f"Schedule: {latest_agent.schedule}")
|
|
50
58
|
|
|
51
|
-
if
|
|
59
|
+
if latest_tools:
|
|
52
60
|
logger.info("--- Selected Tools ---")
|
|
53
|
-
tools_str = "\n".join(f"- {app}: {', '.join(tool_ids)}" for app, tool_ids in
|
|
61
|
+
tools_str = "\n".join(f"- {app}: {', '.join(tool_ids)}" for app, tool_ids in latest_tools.items())
|
|
54
62
|
logger.info(tools_str)
|
|
55
63
|
else:
|
|
56
64
|
logger.info("--- Selected Tools ---")
|
|
@@ -59,41 +67,57 @@ async def run_interactive_build():
|
|
|
59
67
|
|
|
60
68
|
async def run_conversation_build():
|
|
61
69
|
"""Simulates a one-shot agent build from a conversation history payload."""
|
|
62
|
-
logger.info("\n\n--- SCENARIO
|
|
70
|
+
logger.info("\n\n--- SCENARIO 3: AGENT BUILD FROM CONVERSATION HISTORY ---")
|
|
63
71
|
|
|
64
72
|
registry = AgentrRegistry()
|
|
65
73
|
agent = BuilderAgent(
|
|
66
74
|
name="Builder Agent",
|
|
67
75
|
instructions="You build agents from conversation transcripts.",
|
|
68
|
-
model="
|
|
76
|
+
model="azure/gpt-4.1",
|
|
69
77
|
registry=registry,
|
|
70
78
|
)
|
|
71
79
|
|
|
72
80
|
sample_conversation_history = [
|
|
81
|
+
{"type": "human", "content": "hi"},
|
|
82
|
+
{"type": "ai", "content": "Hello! How can I help you today?"},
|
|
83
|
+
{"type": "human", "content": "use the zenquotes tool to tell me a quote"},
|
|
84
|
+
{"type": "ai", "content": ""},
|
|
85
|
+
{
|
|
86
|
+
"type": "tool",
|
|
87
|
+
"content": "\"Tools from zenquotes (status: connected by user):\\n - zenquotes__get_random_quote: Fetches a random inspirational quote from the Zen Quotes API via an HTTP request. It parses the JSON response to extract the quote and author, returning them as a single formatted string ('quote - author'). This function is the primary tool provided by the ZenquotesApp.\\n - zenquotes__get_random_quote: Fetches a random inspirational quote from the Zen Quotes API via an HTTP request. It parses the JSON response to extract the quote and author, returning them as a single formatted string ('quote - author'). This function is the primary tool provided by the ZenquotesApp.\\n\\nTools from perplexity (status: NOT connected by user):\\n - perplexity__answer_with_search: Queries the Perplexity Chat Completions API for a web-search-grounded answer. It sends the user's prompt and model parameters to the `/chat/completions` endpoint, then parses the response to return the synthesized content and a list of supporting source citations, ideal for real-time information retrieval.\\n - perplexity__answer_with_search: Queries the Perplexity Chat Completions API for a web-search-grounded answer. It sends the user's prompt and model parameters to the `/chat/completions` endpoint, then parses the response to return the synthesized content and a list of supporting source citations, ideal for real-time information retrieval.\\n\\nCall load_tools to select the required tools only.\"",
|
|
88
|
+
},
|
|
89
|
+
{"type": "ai", "content": ""},
|
|
90
|
+
{"type": "tool", "content": "\"Successfully loaded 1 tools: ['zenquotes__get_random_quote']\"", "name": "zenquotes__get_random_quote"},
|
|
91
|
+
{"type": "ai", "content": ""},
|
|
73
92
|
{
|
|
74
|
-
"type": "
|
|
75
|
-
"content": "
|
|
93
|
+
"type": "tool",
|
|
94
|
+
"content": '"Decide upon your major definite purpose in life and then organize all your activities around it. - Brian Tracy"',
|
|
76
95
|
},
|
|
77
96
|
{
|
|
78
97
|
"type": "ai",
|
|
79
|
-
"content":
|
|
98
|
+
"content": 'Here’s your quote: \n**"Decide upon your major definite purpose in life and then organize all your activities around it." – Brian Tracy**',
|
|
80
99
|
},
|
|
100
|
+
{"type": "human", "content": "send this quote to ankit@agentr.dev using gmail"},
|
|
101
|
+
{"type": "ai", "content": ""},
|
|
81
102
|
{
|
|
82
|
-
"type": "
|
|
83
|
-
"content": "
|
|
103
|
+
"type": "tool",
|
|
104
|
+
"content": '"Tools from google_mail (status: connected by user):\\n - google_mail__send_email: Composes and immediately sends an email message via the Gmail API. It can function as a reply within an existing conversation if a `thread_id` is provided. This action is distinct from `send_draft`, which sends a previously saved draft message, or `create_draft`, which only saves an email.\\n - google_mail__send_draft: Sends a pre-existing Gmail draft identified by its unique ID. It posts to the `/drafts/send` endpoint, converting a saved draft into a sent message. This function acts on drafts from `create_draft` and differs from `send_email`, which composes and sends an email in one step.\\n - google_mail__create_draft: Saves a new email draft in Gmail with a specified recipient, subject, and body. An optional thread ID can create the draft as a reply within an existing conversation, distinguishing it from `send_email`, which sends immediately.\\n - google_mail__get_draft: Retrieves a specific Gmail draft by its unique ID. This function allows specifying the output format (e.g., full, raw) to control the response detail. Unlike `list_drafts`, it fetches a single, known draft rather than a collection of multiple drafts.\\n\\nCall load_tools to select the required tools only."',
|
|
84
105
|
},
|
|
85
|
-
{"type": "ai", "content": "
|
|
106
|
+
{"type": "ai", "content": ""},
|
|
107
|
+
{"type": "tool", "content": "\"Successfully loaded 1 tools: ['google_mail__send_email']\"", "name": "google_mail__send_email"},
|
|
108
|
+
{"type": "ai", "content": ""},
|
|
109
|
+
{"type": "tool", "content": '{"id": "199765690b278b56", "threadId": "199765690b278b56", "labelIds": ["SENT"]}'},
|
|
110
|
+
{"type": "ai", "content": "The quote has been sent to **ankit@agentr.dev** successfully. ✅"},
|
|
86
111
|
]
|
|
87
|
-
sample_tool_config = {"github": ["get_pull_requests"], "google_docs": ["create_document"]}
|
|
88
|
-
wingman_payload = {"conversation_history": sample_conversation_history, "tool_config": sample_tool_config}
|
|
89
112
|
|
|
90
113
|
logger.info(f"Payload Conversation History Length: {len(sample_conversation_history)} messages")
|
|
91
|
-
logger.info(f"Payload Tools Provided: {list(sample_tool_config.keys())}")
|
|
92
114
|
|
|
93
|
-
# The payload must be passed as a JSON string in the 'user_input'
|
|
94
|
-
payload_str = json.dumps(wingman_payload)
|
|
95
115
|
thread_id = str(uuid4())
|
|
96
|
-
result = await agent.invoke(
|
|
116
|
+
result = await agent.invoke(
|
|
117
|
+
thread_id=thread_id,
|
|
118
|
+
user_input="Generate an agent from the provided conversation.", # This input is for logging/tracing
|
|
119
|
+
messages=sample_conversation_history,
|
|
120
|
+
)
|
|
97
121
|
|
|
98
122
|
generated_agent = result.get("generated_agent")
|
|
99
123
|
tool_config = result.get("tool_config")
|
|
@@ -116,10 +140,61 @@ async def run_conversation_build():
|
|
|
116
140
|
logger.error("Error: Tool configuration is missing.")
|
|
117
141
|
|
|
118
142
|
|
|
143
|
+
async def run_modification_with_manual_tool():
|
|
144
|
+
"""
|
|
145
|
+
Simulates a scenario where a user manually adds a tool to an agent's
|
|
146
|
+
configuration, and then uses the builder to modify the agent for a
|
|
147
|
+
different reason, expecting the manually added tool to be preserved.
|
|
148
|
+
"""
|
|
149
|
+
logger.info("\n\n--- SCENARIO 2: MODIFY AGENT WITH MANUAL TOOL ADDITION ---")
|
|
150
|
+
|
|
151
|
+
registry = AgentrRegistry()
|
|
152
|
+
memory = MemorySaver()
|
|
153
|
+
agent = BuilderAgent(
|
|
154
|
+
name="Builder Agent",
|
|
155
|
+
instructions="You are a builder agent that creates other agents.",
|
|
156
|
+
model="azure/gpt-4.1",
|
|
157
|
+
registry=registry,
|
|
158
|
+
memory=memory,
|
|
159
|
+
)
|
|
160
|
+
|
|
161
|
+
thread_id = str(uuid4())
|
|
162
|
+
|
|
163
|
+
initial_request = "Send an email to manoj@agentr.dev with the subject 'Hello' using my Gmail account."
|
|
164
|
+
logger.info(f"User Request: '{initial_request}'")
|
|
165
|
+
|
|
166
|
+
initial_result = await agent.invoke(user_input=initial_request, thread_id=thread_id)
|
|
167
|
+
initial_agent = initial_result.get("generated_agent")
|
|
168
|
+
initial_tools = initial_result.get("tool_config")
|
|
169
|
+
|
|
170
|
+
tools_str = "\n".join(f"- {app}: {', '.join(tool_ids)}" for app, tool_ids in initial_tools.items())
|
|
171
|
+
logger.info(tools_str)
|
|
172
|
+
|
|
173
|
+
manually_modified_tools = initial_tools.copy()
|
|
174
|
+
manually_modified_tools["reddit"] = ["create_post"]
|
|
175
|
+
tools_str = "\n".join(f"- {app}: {', '.join(tool_ids)}" for app, tool_ids in manually_modified_tools.items())
|
|
176
|
+
logger.info(tools_str)
|
|
177
|
+
|
|
178
|
+
modification_request = "Also add the above email to my draft"
|
|
179
|
+
logger.info(f"User Request: '{modification_request}'")
|
|
180
|
+
|
|
181
|
+
final_result = await agent.invoke(
|
|
182
|
+
user_input=modification_request,
|
|
183
|
+
thread_id=thread_id,
|
|
184
|
+
agent=initial_agent,
|
|
185
|
+
tools=manually_modified_tools,
|
|
186
|
+
)
|
|
187
|
+
|
|
188
|
+
final_tools = final_result.get("tool_config")
|
|
189
|
+
tools_str = "\n".join(f"- {app}: {', '.join(tool_ids)}" for app, tool_ids in final_tools.items())
|
|
190
|
+
logger.info(tools_str)
|
|
191
|
+
|
|
192
|
+
|
|
119
193
|
async def main():
|
|
120
194
|
await run_interactive_build()
|
|
121
195
|
await run_conversation_build()
|
|
196
|
+
await run_modification_with_manual_tool()
|
|
122
197
|
|
|
123
198
|
|
|
124
199
|
if __name__ == "__main__":
|
|
125
|
-
asyncio.run(main())
|
|
200
|
+
asyncio.run(main())
|