universal-mcp 0.1.24rc12__py3-none-any.whl → 0.1.24rc14__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- universal_mcp/agentr/registry.py +6 -0
- universal_mcp/agents/__init__.py +5 -1
- universal_mcp/agents/autoagent/__init__.py +1 -2
- universal_mcp/agents/autoagent/__main__.py +8 -5
- universal_mcp/agents/autoagent/graph.py +4 -7
- universal_mcp/agents/autoagent/prompts.py +1 -1
- universal_mcp/agents/base.py +2 -4
- universal_mcp/agents/bigtool/__init__.py +54 -0
- universal_mcp/agents/bigtool/__main__.py +24 -0
- universal_mcp/agents/bigtool/context.py +24 -0
- universal_mcp/agents/bigtool/graph.py +166 -0
- universal_mcp/agents/bigtool/prompts.py +31 -0
- universal_mcp/agents/bigtool/state.py +27 -0
- universal_mcp/agents/bigtool2/__init__.py +53 -0
- universal_mcp/agents/bigtool2/__main__.py +24 -0
- universal_mcp/agents/bigtool2/agent.py +11 -0
- universal_mcp/agents/bigtool2/context.py +33 -0
- universal_mcp/agents/bigtool2/graph.py +169 -0
- universal_mcp/agents/bigtool2/prompts.py +12 -0
- universal_mcp/agents/bigtool2/state.py +27 -0
- universal_mcp/agents/builder.py +80 -0
- universal_mcp/agents/llm.py +23 -16
- universal_mcp/agents/planner/__init__.py +37 -0
- universal_mcp/agents/planner/__main__.py +24 -0
- universal_mcp/agents/planner/graph.py +82 -0
- universal_mcp/agents/planner/prompts.py +1 -0
- universal_mcp/agents/planner/state.py +12 -0
- universal_mcp/agents/shared/agent_node.py +34 -0
- universal_mcp/agents/shared/tool_node.py +235 -0
- universal_mcp/tools/registry.py +5 -0
- universal_mcp/types.py +5 -2
- {universal_mcp-0.1.24rc12.dist-info → universal_mcp-0.1.24rc14.dist-info}/METADATA +3 -1
- {universal_mcp-0.1.24rc12.dist-info → universal_mcp-0.1.24rc14.dist-info}/RECORD +36 -16
- universal_mcp/agents/auto.py +0 -575
- {universal_mcp-0.1.24rc12.dist-info → universal_mcp-0.1.24rc14.dist-info}/WHEEL +0 -0
- {universal_mcp-0.1.24rc12.dist-info → universal_mcp-0.1.24rc14.dist-info}/entry_points.txt +0 -0
- {universal_mcp-0.1.24rc12.dist-info → universal_mcp-0.1.24rc14.dist-info}/licenses/LICENSE +0 -0
universal_mcp/agents/auto.py
DELETED
@@ -1,575 +0,0 @@
|
|
1
|
-
import asyncio
|
2
|
-
import datetime
|
3
|
-
import os
|
4
|
-
from typing import Annotated, cast
|
5
|
-
|
6
|
-
from langchain_core.messages import AIMessage, AIMessageChunk, BaseMessage, HumanMessage
|
7
|
-
from langgraph.graph import END, START, StateGraph
|
8
|
-
from langgraph.graph.message import add_messages
|
9
|
-
from langgraph.prebuilt import create_react_agent
|
10
|
-
from loguru import logger
|
11
|
-
from pydantic import BaseModel
|
12
|
-
from typing_extensions import TypedDict
|
13
|
-
|
14
|
-
from universal_mcp.agentr.registry import AgentrRegistry
|
15
|
-
from universal_mcp.agents.base import BaseAgent
|
16
|
-
from universal_mcp.agents.llm import load_chat_model
|
17
|
-
from universal_mcp.tools import ToolManager
|
18
|
-
from universal_mcp.tools.adapters import ToolFormat
|
19
|
-
from universal_mcp.tools.registry import ToolRegistry
|
20
|
-
|
21
|
-
# Auto Agent
|
22
|
-
# Working
|
23
|
-
# 1. For every message, and given list of tools, figure out if external tools are needed
|
24
|
-
# 2. In case of extra tools needed, make a list of tools and send to subgraph
|
25
|
-
# 3. In case no tool needed forward to simple chatbot
|
26
|
-
|
27
|
-
# Subgraph
|
28
|
-
# In case extra tools are needed, ask for clarification from user what tools are required
|
29
|
-
|
30
|
-
|
31
|
-
class State(TypedDict):
|
32
|
-
messages: Annotated[list, add_messages]
|
33
|
-
loaded_apps: list[str]
|
34
|
-
choice_data: dict | None
|
35
|
-
|
36
|
-
|
37
|
-
class AppSet(BaseModel):
|
38
|
-
"""Represents a set of apps for a specific purpose"""
|
39
|
-
|
40
|
-
purpose: str
|
41
|
-
apps: list[str]
|
42
|
-
choice: bool # Whether user choice is needed for this app set
|
43
|
-
|
44
|
-
|
45
|
-
class TaskAnalysis(BaseModel):
|
46
|
-
"""Combined analysis of task type and app requirements"""
|
47
|
-
|
48
|
-
requires_app: bool
|
49
|
-
reasoning: str
|
50
|
-
app_sets: list[AppSet] = [] # Multiple sets of app choices with purpose and choice flags
|
51
|
-
|
52
|
-
|
53
|
-
class UserChoices(BaseModel):
|
54
|
-
"""Structured output for parsing user choice responses"""
|
55
|
-
|
56
|
-
user_choices: list[str] = []
|
57
|
-
|
58
|
-
|
59
|
-
class AutoAgent(BaseAgent):
|
60
|
-
def __init__(self, name: str, instructions: str, model: str, app_registry: ToolRegistry):
|
61
|
-
super().__init__(name, instructions, model)
|
62
|
-
self.app_registry = app_registry
|
63
|
-
self.llm_tools = load_chat_model(model, tags=["tools"])
|
64
|
-
self.llm_choice = load_chat_model(model, tags=["choice"])
|
65
|
-
self.llm_quiet = load_chat_model(model, tags=["quiet"])
|
66
|
-
self.tool_manager = ToolManager()
|
67
|
-
|
68
|
-
self.task_analysis_prompt = """You are a task analysis expert. Given a task description and available apps, determine:
|
69
|
-
|
70
|
-
1. Whether the task requires an external application or can be handled through general reasoning
|
71
|
-
2. If it requires an app, which apps are most relevant
|
72
|
-
3. If the task requires multiple different types of functionality, organize apps into logical sets with purposes
|
73
|
-
|
74
|
-
Tasks that typically require apps:
|
75
|
-
- Searching the web for information
|
76
|
-
- Sending emails
|
77
|
-
- Creating or editing documents
|
78
|
-
- Managing calendars or schedules
|
79
|
-
- Processing data or files
|
80
|
-
- Interacting with social media
|
81
|
-
- Making API calls to external services
|
82
|
-
|
83
|
-
Tasks that typically don't require apps:
|
84
|
-
- General reasoning and analysis
|
85
|
-
- Mathematical calculations
|
86
|
-
- Text summarization or analysis
|
87
|
-
- Providing explanations or educational content
|
88
|
-
- Planning and organization
|
89
|
-
- Creative writing or brainstorming
|
90
|
-
- Logical problem solving
|
91
|
-
|
92
|
-
For complex tasks that require multiple types of functionality, organize apps into logical sets with clear purposes.
|
93
|
-
For example, if a task requires both email and search functionality, you might create:
|
94
|
-
- app_sets: [
|
95
|
-
{"purpose": "Email communication", "apps": ["outlook", "google-mail"], "choice": true},
|
96
|
-
{"purpose": "Web search", "apps": ["serpapi", "tavily"], "choice": false}
|
97
|
-
]
|
98
|
-
|
99
|
-
Each app set should have:
|
100
|
-
- purpose: A clear description of what this set of apps is for
|
101
|
-
- apps: List of app IDs that serve this purpose
|
102
|
-
- choice: Boolean indicating if user choice is needed (true) or all apps should be auto-loaded (false)
|
103
|
-
|
104
|
-
Set choice to True if the user should choose from the apps in that set.
|
105
|
-
Set choice to False if all apps in that set should be automatically loaded.
|
106
|
-
|
107
|
-
Analyze the given task and determine if it requires an external app or can be completed through general reasoning.
|
108
|
-
If it requires an app, select the most relevant apps from the available list.
|
109
|
-
If the task requires multiple different types of functionality, organize apps into logical sets with clear purposes.
|
110
|
-
|
111
|
-
If an app has previously been loaded, it should not be loaded again.
|
112
|
-
"""
|
113
|
-
logger.debug("AutoAgent initialized successfully")
|
114
|
-
self._graph = self._build_graph()
|
115
|
-
|
116
|
-
def _build_graph(self):
|
117
|
-
graph_builder = StateGraph(State)
|
118
|
-
|
119
|
-
async def task_analyzer(state: State):
|
120
|
-
"""Analyze the task and determine if choice is needed"""
|
121
|
-
response = await self.run(state["messages"])
|
122
|
-
|
123
|
-
# Get current loaded_apps from state, defaulting to empty list if not present
|
124
|
-
current_loaded_apps = state.get("loaded_apps", [])
|
125
|
-
|
126
|
-
# Check if the response is choice data (dict) or a direct response (str)
|
127
|
-
if isinstance(response, dict) and "requires_app" in response:
|
128
|
-
# This is choice data - store it and ask for user input
|
129
|
-
app_sets = response.get("app_sets", [])
|
130
|
-
|
131
|
-
# Use LLM to generate a natural choice message
|
132
|
-
choice_message = await self._generate_choice_message(app_sets, response["task"])
|
133
|
-
|
134
|
-
# Update loaded_apps with any auto-selected apps from the choice data
|
135
|
-
if "auto_selected_apps" in response:
|
136
|
-
current_loaded_apps.extend(response["auto_selected_apps"])
|
137
|
-
|
138
|
-
# Return the choice message and signal to go to choice node
|
139
|
-
return {
|
140
|
-
"messages": [AIMessage(content=choice_message)],
|
141
|
-
"loaded_apps": current_loaded_apps,
|
142
|
-
"choice_data": response,
|
143
|
-
}
|
144
|
-
else:
|
145
|
-
# This is a direct response
|
146
|
-
return {
|
147
|
-
"messages": [AIMessage(content=str(response))],
|
148
|
-
"loaded_apps": current_loaded_apps,
|
149
|
-
"choice_data": None,
|
150
|
-
}
|
151
|
-
|
152
|
-
async def choice_handler(state: State):
|
153
|
-
"""Handle user choice input and execute with selected apps"""
|
154
|
-
user_input = state["messages"][-1].content
|
155
|
-
|
156
|
-
# Get current loaded_apps from state, defaulting to empty list if not present
|
157
|
-
current_loaded_apps = state.get("loaded_apps", [])
|
158
|
-
choice_data = state.get("choice_data")
|
159
|
-
|
160
|
-
if not choice_data:
|
161
|
-
return {
|
162
|
-
"messages": [AIMessage(content="No choice data available. Please try again.")],
|
163
|
-
"loaded_apps": current_loaded_apps,
|
164
|
-
"choice_data": None,
|
165
|
-
}
|
166
|
-
|
167
|
-
# Parse user choices using LLM
|
168
|
-
user_choices = await self.parse_user_choices_with_llm(user_input, choice_data)
|
169
|
-
|
170
|
-
# Execute with the parsed choices
|
171
|
-
result = await self.run(state["messages"], user_choices=user_choices, loaded_apps=current_loaded_apps)
|
172
|
-
|
173
|
-
# Update loaded_apps with the user-selected apps
|
174
|
-
current_loaded_apps.extend(user_choices)
|
175
|
-
|
176
|
-
return {
|
177
|
-
"messages": [AIMessage(content=str(result))],
|
178
|
-
"loaded_apps": current_loaded_apps,
|
179
|
-
"choice_data": None,
|
180
|
-
}
|
181
|
-
|
182
|
-
graph_builder.add_node("task_analyzer", task_analyzer)
|
183
|
-
graph_builder.add_node("choice_handler", choice_handler)
|
184
|
-
|
185
|
-
# Add conditional edge from START to task_analyzer or choice_handler
|
186
|
-
def route_from_start(state: State):
|
187
|
-
# Check if we have stored choice data (indicating we need to handle choices)
|
188
|
-
if state.get("choice_data") is not None:
|
189
|
-
return "choice_handler"
|
190
|
-
else:
|
191
|
-
return "task_analyzer"
|
192
|
-
|
193
|
-
graph_builder.add_conditional_edges(START, route_from_start)
|
194
|
-
graph_builder.add_edge("task_analyzer", END)
|
195
|
-
graph_builder.add_edge("choice_handler", END)
|
196
|
-
|
197
|
-
return graph_builder.compile(checkpointer=self.memory)
|
198
|
-
|
199
|
-
@property
|
200
|
-
def graph(self):
|
201
|
-
return self._graph
|
202
|
-
|
203
|
-
async def stream(self, thread_id: str, user_input: str):
|
204
|
-
async for event, metadata in self.graph.astream(
|
205
|
-
{"messages": [{"role": "user", "content": user_input}]},
|
206
|
-
config={"configurable": {"thread_id": thread_id}},
|
207
|
-
stream_mode="messages",
|
208
|
-
):
|
209
|
-
logger.info(f"Stream event: {event}")
|
210
|
-
logger.info(f"Stream metadata: {metadata}")
|
211
|
-
if "tags" in metadata and "quiet" in metadata["tags"]:
|
212
|
-
pass
|
213
|
-
else:
|
214
|
-
event = cast(AIMessageChunk, event)
|
215
|
-
yield event
|
216
|
-
|
217
|
-
async def get_app_details(self, app_ids: list[str]) -> list[dict]:
|
218
|
-
"""Get detailed information about apps for better choice presentation"""
|
219
|
-
app_details = []
|
220
|
-
|
221
|
-
for app_id in app_ids:
|
222
|
-
try:
|
223
|
-
# Get app info from platform manager
|
224
|
-
app_info = await self.app_registry.get_app_details(app_id)
|
225
|
-
app_details.append(app_info)
|
226
|
-
except Exception as e:
|
227
|
-
logger.error(f"Error getting details for app {app_id}: {e}")
|
228
|
-
app_details.append(
|
229
|
-
{
|
230
|
-
"id": app_id,
|
231
|
-
"name": app_id,
|
232
|
-
"description": "Error loading details",
|
233
|
-
"category": "Unknown",
|
234
|
-
"available": True,
|
235
|
-
}
|
236
|
-
)
|
237
|
-
|
238
|
-
return app_details
|
239
|
-
|
240
|
-
async def get_app_choice_data(self, app_sets: list[AppSet], messages: list[BaseMessage]) -> dict:
|
241
|
-
"""Get app choice data for frontend display"""
|
242
|
-
task = messages[-1].content
|
243
|
-
logger.info(f"Preparing app choice data for task: {task}")
|
244
|
-
|
245
|
-
choice_data = {"task": task, "app_sets": []}
|
246
|
-
|
247
|
-
# Load auto-selected apps immediately
|
248
|
-
auto_selected_apps = []
|
249
|
-
|
250
|
-
for set_index, app_set in enumerate(app_sets, 1):
|
251
|
-
# Get detailed information about the apps in this set
|
252
|
-
app_details = await self.get_app_details(app_set.apps)
|
253
|
-
available_apps = [app for app in app_details if app.get("available", False)]
|
254
|
-
|
255
|
-
if not available_apps:
|
256
|
-
logger.warning(f"No available apps found in set {set_index}")
|
257
|
-
continue
|
258
|
-
|
259
|
-
if len(available_apps) == 1:
|
260
|
-
# Only one available app, use it
|
261
|
-
selected = available_apps[0]["id"]
|
262
|
-
logger.info(f"Only one available app in set {set_index}: {selected}")
|
263
|
-
auto_selected_apps.append(selected)
|
264
|
-
continue
|
265
|
-
|
266
|
-
if not app_set.choice:
|
267
|
-
# Automatically load all apps in this set
|
268
|
-
selected_apps = [app["id"] for app in available_apps]
|
269
|
-
selected_names = [app["name"] for app in available_apps]
|
270
|
-
logger.info(f"Automatically loading all apps in set {set_index}: {', '.join(selected_names)}")
|
271
|
-
auto_selected_apps.extend(selected_apps)
|
272
|
-
continue
|
273
|
-
|
274
|
-
# Add this set to choice data for frontend
|
275
|
-
set_data = {
|
276
|
-
"set_index": set_index,
|
277
|
-
"purpose": app_set.purpose,
|
278
|
-
"apps": available_apps,
|
279
|
-
"needs_choice": app_set.choice,
|
280
|
-
}
|
281
|
-
choice_data["app_sets"].append(set_data)
|
282
|
-
|
283
|
-
# Load auto-selected apps immediately
|
284
|
-
if auto_selected_apps:
|
285
|
-
logger.info(f"Loading auto-selected apps: {', '.join(auto_selected_apps)}")
|
286
|
-
await self._load_actions_for_apps(auto_selected_apps)
|
287
|
-
|
288
|
-
logger.info(
|
289
|
-
f"Prepared choice data with {len(choice_data['app_sets'])} sets and {len(auto_selected_apps)} auto-selected apps"
|
290
|
-
)
|
291
|
-
|
292
|
-
# Add auto-selected apps to the choice data for state tracking
|
293
|
-
choice_data["auto_selected_apps"] = auto_selected_apps
|
294
|
-
|
295
|
-
return choice_data
|
296
|
-
|
297
|
-
async def _generate_choice_message(self, app_sets: list[dict], task: str) -> str:
|
298
|
-
"""Use LLM to generate a natural choice message for app selection"""
|
299
|
-
if not app_sets:
|
300
|
-
return "I need to load some apps to help with your request."
|
301
|
-
|
302
|
-
# Format app sets for the LLM
|
303
|
-
app_sets_info = []
|
304
|
-
for i, app_set in enumerate(app_sets, 1):
|
305
|
-
purpose = app_set.get("purpose", f"Set {i}")
|
306
|
-
apps_info = []
|
307
|
-
for app in app_set.get("apps", []):
|
308
|
-
app_name = app.get("name", app.get("id"))
|
309
|
-
app_desc = app.get("description", "No description")
|
310
|
-
apps_info.append(f"- {app_name}: {app_desc}")
|
311
|
-
|
312
|
-
app_sets_info.append(f"{purpose}:\n" + "\n".join(apps_info))
|
313
|
-
|
314
|
-
app_sets_text = "\n\n".join(app_sets_info)
|
315
|
-
|
316
|
-
prompt = f"""You are an agent capable of performing different actions to help the user. The user has asked you to perform a task, however, that is possible using multiple different apps. The task is:
|
317
|
-
{task}
|
318
|
-
The user has the following apps available to them, for performing the task they have asked for:
|
319
|
-
|
320
|
-
{app_sets_text}
|
321
|
-
The above may contain multiple sets of apps, each with a different purpose for performing the task. Now draft a message asking the user to select apps from each of these sets.
|
322
|
-
Be friendly and concise, but list each set of apps clearly. Do not return any other text than the question to be asked to the user, since it will be directly sent to the user. That is, do not start with "Here is the message to be sent to the user:" or anything like that."""
|
323
|
-
|
324
|
-
try:
|
325
|
-
response = await self.llm_quiet.ainvoke(prompt)
|
326
|
-
return response.content
|
327
|
-
except Exception as e:
|
328
|
-
logger.error(f"Failed to generate choice message with LLM: {e}")
|
329
|
-
# Fallback to a simple message
|
330
|
-
if len(app_sets) == 1:
|
331
|
-
purpose = app_sets[0].get("purpose", "this task")
|
332
|
-
return (
|
333
|
-
f"I need to know which app you'd prefer to use for {purpose}. Please choose from the options above."
|
334
|
-
)
|
335
|
-
else:
|
336
|
-
return "I need to load some apps to help with your request. Please let me know which apps you'd like me to use for each category."
|
337
|
-
|
338
|
-
# TODO: Use a proper handler for this, the ui is going to send a proper json with choices
|
339
|
-
|
340
|
-
async def parse_user_choices_with_llm(self, user_input: str, choice_data: dict) -> list[str]:
|
341
|
-
"""Use LLM to parse user choice input and return a list of selected app IDs"""
|
342
|
-
logger.info(f"Using LLM to parse user choices: {user_input}")
|
343
|
-
|
344
|
-
# Create a prompt for the LLM to parse the user's choice
|
345
|
-
available_apps = []
|
346
|
-
for i, app_set in enumerate(choice_data.get("app_sets", []), 1):
|
347
|
-
purpose = app_set.get("purpose", f"Set {i}")
|
348
|
-
available_apps.append(f"\n{purpose}:")
|
349
|
-
for app in app_set.get("apps", []):
|
350
|
-
available_apps.append(
|
351
|
-
f" - {app.get('name', app.get('id'))} (ID: {app.get('id')}) - {app.get('description', 'No description')}"
|
352
|
-
)
|
353
|
-
|
354
|
-
prompt = f"""
|
355
|
-
You are a choice parser. The user has been asked to choose from the following app sets:
|
356
|
-
|
357
|
-
Available apps:
|
358
|
-
{chr(10).join(available_apps)}
|
359
|
-
|
360
|
-
The user responded with: "{user_input}"
|
361
|
-
|
362
|
-
Please parse their response and extract their choices as a simple list of app IDs.
|
363
|
-
|
364
|
-
Rules:
|
365
|
-
1. Return only the app IDs that the user selected
|
366
|
-
2. If the user says "all" or "everything", include all apps from that set
|
367
|
-
3. If the user says "none" or "skip", don't include that set
|
368
|
-
4. Match app names as closely as possible to the available apps
|
369
|
-
5. If the user's response is unclear, make your best guess based on context
|
370
|
-
6. Return only the app IDs, not the full names
|
371
|
-
"""
|
372
|
-
|
373
|
-
try:
|
374
|
-
# Use structured output with Pydantic model
|
375
|
-
structured_llm = self.llm_quiet.with_structured_output(UserChoices)
|
376
|
-
parsed_choices = await structured_llm.ainvoke(prompt)
|
377
|
-
|
378
|
-
logger.info(f"LLM parsed choices: {parsed_choices}")
|
379
|
-
return parsed_choices.user_choices
|
380
|
-
|
381
|
-
except Exception as e:
|
382
|
-
logger.error(f"Failed to parse user choices with LLM: {e}")
|
383
|
-
# Fallback to empty list
|
384
|
-
return []
|
385
|
-
|
386
|
-
async def load_action_for_app(self, app_id):
|
387
|
-
"""Load actions for an app using the platform manager"""
|
388
|
-
await self.app_registry.load_tools_for_app(app_id, self.tool_manager)
|
389
|
-
|
390
|
-
async def analyze_task_and_select_apps(
|
391
|
-
self,
|
392
|
-
task: str,
|
393
|
-
available_apps: list[dict],
|
394
|
-
messages: list[BaseMessage] | None = None,
|
395
|
-
loaded_apps: list[str] | None = None,
|
396
|
-
) -> TaskAnalysis:
|
397
|
-
"""Combined task analysis and app selection to reduce LLM calls"""
|
398
|
-
logger.info(f"Analyzing task and selecting apps: {task}")
|
399
|
-
|
400
|
-
# Handle mutable default argument
|
401
|
-
if loaded_apps is None:
|
402
|
-
loaded_apps = []
|
403
|
-
|
404
|
-
# Get conversation context from messages
|
405
|
-
context_summary = ""
|
406
|
-
|
407
|
-
if messages and len(messages) > 1: # More than just the current task
|
408
|
-
# Create a summary of previous conversation context
|
409
|
-
previous_messages = messages[:-1] # Exclude current task
|
410
|
-
context_messages = []
|
411
|
-
|
412
|
-
for msg in previous_messages[-5:]: # Last 5 messages for context
|
413
|
-
if isinstance(msg, HumanMessage):
|
414
|
-
context_messages.append(f"User: {msg.content}")
|
415
|
-
elif isinstance(msg, AIMessage):
|
416
|
-
context_messages.append(f"Assistant: {msg.content[:200]}...") # Truncate long responses
|
417
|
-
|
418
|
-
if context_messages:
|
419
|
-
context_summary = "\n\nPrevious conversation context:\n" + "\n".join(context_messages)
|
420
|
-
logger.debug(f"Adding conversation context: {len(context_messages)} previous messages")
|
421
|
-
|
422
|
-
prompt = f"""
|
423
|
-
{self.task_analysis_prompt}
|
424
|
-
|
425
|
-
Task: {task}
|
426
|
-
Available apps: {available_apps}{context_summary}
|
427
|
-
|
428
|
-
Determine if this task requires an external application or can be completed through general reasoning and knowledge.
|
429
|
-
If it requires an app, select the most relevant apps from the available list.
|
430
|
-
If the task requires multiple different types of functionality, organize apps into logical sets with clear purposes using the app_sets field.
|
431
|
-
|
432
|
-
Consider the conversation context when making your decision. For example:
|
433
|
-
- If the user previously mentioned specific apps or tools, prefer those
|
434
|
-
- If the conversation is about a specific topic, choose apps relevant to that topic
|
435
|
-
- If the user is continuing a previous task, maintain consistency in app selection. You do not need to load the same app again.
|
436
|
-
The set of loaded apps is {loaded_apps}
|
437
|
-
"""
|
438
|
-
|
439
|
-
# Use structured output with Pydantic model
|
440
|
-
structured_llm = self.llm_quiet.with_structured_output(TaskAnalysis)
|
441
|
-
response = await structured_llm.ainvoke(prompt)
|
442
|
-
logger.debug(f"Task analysis response: {response}")
|
443
|
-
|
444
|
-
logger.info(f"Task requires app: {response.requires_app}")
|
445
|
-
logger.info(f"Reasoning: {response.reasoning}")
|
446
|
-
if response.requires_app:
|
447
|
-
logger.info(f"App sets: {response.app_sets}")
|
448
|
-
|
449
|
-
return response
|
450
|
-
|
451
|
-
async def _load_actions_for_apps(self, selected_apps: list[str]) -> None:
|
452
|
-
"""Load actions for a list of apps"""
|
453
|
-
for app_id in selected_apps:
|
454
|
-
logger.info(f"Loading actions for app: {app_id}")
|
455
|
-
try:
|
456
|
-
await self.load_action_for_app(app_id)
|
457
|
-
logger.info(f"Successfully loaded actions for app: {app_id}")
|
458
|
-
except Exception as e:
|
459
|
-
logger.error(f"Failed to load actions for app {app_id}: {e}")
|
460
|
-
continue
|
461
|
-
|
462
|
-
async def _execute_with_selected_apps(self, selected_apps: list[str], messages: list[BaseMessage] = None) -> str:
|
463
|
-
"""Load selected apps and execute the task, falling back to general reasoning if needed"""
|
464
|
-
if not selected_apps:
|
465
|
-
logger.warning("No apps selected, using general reasoning")
|
466
|
-
return await self._execute_task_with_agent(messages or [])
|
467
|
-
|
468
|
-
await self._load_actions_for_apps(selected_apps)
|
469
|
-
|
470
|
-
logger.info(f"Successfully loaded actions for {len(selected_apps)} apps: {', '.join(selected_apps)}")
|
471
|
-
return await self._execute_task_with_agent(messages or [])
|
472
|
-
|
473
|
-
async def _execute_task_with_agent(self, messages: list[BaseMessage]) -> str:
|
474
|
-
"""Execute a task using the current agent with provided messages"""
|
475
|
-
agent = self.get_agent()
|
476
|
-
results = await agent.ainvoke({"messages": messages})
|
477
|
-
ai_message = results["messages"][-1]
|
478
|
-
return ai_message.content
|
479
|
-
|
480
|
-
def get_agent(self):
|
481
|
-
"""Get or create an agent with tools."""
|
482
|
-
# Always create a new agent when requested or if no agent exists
|
483
|
-
|
484
|
-
logger.info("Creating new agent with tools")
|
485
|
-
tools = self.tool_manager.list_tools(format=ToolFormat.LANGCHAIN)
|
486
|
-
logger.debug(f"Created agent with {len(tools)} tools")
|
487
|
-
|
488
|
-
# Get current datetime and timezone information
|
489
|
-
current_time = datetime.datetime.now()
|
490
|
-
utc_time = datetime.datetime.now(datetime.UTC)
|
491
|
-
timezone_info = f"Current local time: {current_time.strftime('%Y-%m-%d %H:%M:%S')} | UTC time: {utc_time.strftime('%Y-%m-%d %H:%M:%S')}"
|
492
|
-
|
493
|
-
agent = create_react_agent(
|
494
|
-
self.llm_tools,
|
495
|
-
tools=tools,
|
496
|
-
prompt=f"You are a helpful assistant that is given a list of actions for an app. You are also given a task. Use the tools to complete the task. Current time information: {timezone_info}. Additionally, the following instructions have been given by the user: {self.instructions}",
|
497
|
-
)
|
498
|
-
logger.info("Agent created successfully")
|
499
|
-
|
500
|
-
return agent
|
501
|
-
|
502
|
-
async def run(
|
503
|
-
self, messages: list[BaseMessage], user_choices: list[str] | None = None, loaded_apps: list[str] | None = None
|
504
|
-
):
|
505
|
-
# Extract task from the last message
|
506
|
-
if not messages or len(messages) == 0:
|
507
|
-
raise ValueError("No messages provided")
|
508
|
-
|
509
|
-
# Handle mutable default argument
|
510
|
-
if loaded_apps is None:
|
511
|
-
loaded_apps = []
|
512
|
-
|
513
|
-
task = messages[-1].content
|
514
|
-
logger.info(f"Starting task execution: {task}")
|
515
|
-
|
516
|
-
# If user_choices are provided, skip task analysis and execute directly
|
517
|
-
if user_choices:
|
518
|
-
logger.info("User choices provided, skipping task analysis")
|
519
|
-
logger.info(f"User selected apps: {', '.join(user_choices)}")
|
520
|
-
result = await self._execute_with_selected_apps(user_choices, messages)
|
521
|
-
return result
|
522
|
-
|
523
|
-
# Get all available apps from platform manager
|
524
|
-
available_apps = self.app_registry.list_apps()
|
525
|
-
|
526
|
-
logger.info(f"Found {len(available_apps)} available apps")
|
527
|
-
|
528
|
-
# Analyze task and select apps
|
529
|
-
task_analysis = await self.analyze_task_and_select_apps(task, available_apps, messages, loaded_apps)
|
530
|
-
|
531
|
-
if not task_analysis.requires_app:
|
532
|
-
logger.info("Task does not require an app, using general reasoning")
|
533
|
-
return await self._execute_task_with_agent(messages or [])
|
534
|
-
|
535
|
-
if not task_analysis.app_sets:
|
536
|
-
logger.warning(f"No suitable app found for task: {task}")
|
537
|
-
logger.info("Falling back to general reasoning for this task")
|
538
|
-
return await self._execute_task_with_agent(messages or [])
|
539
|
-
|
540
|
-
# Check if choices are required
|
541
|
-
choice_data = await self.get_app_choice_data(task_analysis.app_sets, messages)
|
542
|
-
choice_data["requires_app"] = True
|
543
|
-
choice_data["reasoning"] = task_analysis.reasoning
|
544
|
-
choice_data["task"] = task
|
545
|
-
|
546
|
-
# If no choices are needed (all apps auto-selected), execute directly
|
547
|
-
if not choice_data["app_sets"]:
|
548
|
-
logger.info("No user choices required, auto-selected apps already loaded")
|
549
|
-
return await self._execute_task_with_agent(messages)
|
550
|
-
|
551
|
-
logger.info("User choices required, providing choice data")
|
552
|
-
return choice_data
|
553
|
-
|
554
|
-
|
555
|
-
if __name__ == "__main__":
|
556
|
-
# Test the AutoAgent
|
557
|
-
|
558
|
-
# Get API key from environment or use a placeholder
|
559
|
-
agentr_api_key = os.getenv("AGENTR_API_KEY", "test_api_key")
|
560
|
-
if not agentr_api_key:
|
561
|
-
agentr_api_key = input("Enter your API key: ")
|
562
|
-
|
563
|
-
# Create platform manager
|
564
|
-
app_registry = AgentrRegistry(api_key=agentr_api_key)
|
565
|
-
want_instructions = input("Do you want to add a system prompt/instructions? (Y/N): ")
|
566
|
-
instructions = "" if want_instructions.upper() == "N" else input("Enter your instructions/system prompt: ")
|
567
|
-
|
568
|
-
agent = AutoAgent("Auto Agent", instructions, "azure/gpt-4.1", app_registry=app_registry)
|
569
|
-
|
570
|
-
logger.info("AutoAgent created successfully!")
|
571
|
-
logger.info(f"Agent name: {agent.name}")
|
572
|
-
logger.info(f"Agent instructions: {agent.instructions}")
|
573
|
-
logger.info(f"Agent model: {agent.model}")
|
574
|
-
|
575
|
-
asyncio.run(agent.run_interactive())
|
File without changes
|
File without changes
|
File without changes
|