universal-mcp 0.1.24rc2__py3-none-any.whl → 0.1.24rc4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. universal_mcp/agentr/README.md +201 -0
  2. universal_mcp/agentr/__init__.py +6 -0
  3. universal_mcp/agentr/agentr.py +30 -0
  4. universal_mcp/{utils/agentr.py → agentr/client.py} +19 -3
  5. universal_mcp/agentr/integration.py +104 -0
  6. universal_mcp/agentr/registry.py +91 -0
  7. universal_mcp/agentr/server.py +51 -0
  8. universal_mcp/agents/__init__.py +6 -0
  9. universal_mcp/agents/auto.py +576 -0
  10. universal_mcp/agents/base.py +88 -0
  11. universal_mcp/agents/cli.py +27 -0
  12. universal_mcp/agents/codeact/__init__.py +243 -0
  13. universal_mcp/agents/codeact/sandbox.py +27 -0
  14. universal_mcp/agents/codeact/test.py +15 -0
  15. universal_mcp/agents/codeact/utils.py +61 -0
  16. universal_mcp/agents/hil.py +104 -0
  17. universal_mcp/agents/llm.py +10 -0
  18. universal_mcp/agents/react.py +58 -0
  19. universal_mcp/agents/simple.py +40 -0
  20. universal_mcp/agents/utils.py +111 -0
  21. universal_mcp/analytics.py +5 -7
  22. universal_mcp/applications/__init__.py +42 -75
  23. universal_mcp/applications/application.py +1 -1
  24. universal_mcp/applications/sample/app.py +245 -0
  25. universal_mcp/cli.py +10 -3
  26. universal_mcp/config.py +33 -7
  27. universal_mcp/exceptions.py +4 -0
  28. universal_mcp/integrations/__init__.py +0 -15
  29. universal_mcp/integrations/integration.py +9 -91
  30. universal_mcp/servers/__init__.py +2 -14
  31. universal_mcp/servers/server.py +10 -51
  32. universal_mcp/tools/__init__.py +3 -0
  33. universal_mcp/tools/adapters.py +20 -11
  34. universal_mcp/tools/manager.py +29 -56
  35. universal_mcp/tools/registry.py +41 -0
  36. universal_mcp/tools/tools.py +22 -1
  37. universal_mcp/types.py +10 -0
  38. universal_mcp/utils/common.py +245 -0
  39. universal_mcp/utils/openapi/api_generator.py +46 -18
  40. universal_mcp/utils/openapi/cli.py +445 -19
  41. universal_mcp/utils/openapi/openapi.py +284 -21
  42. universal_mcp/utils/openapi/postprocessor.py +275 -0
  43. universal_mcp/utils/openapi/preprocessor.py +1 -1
  44. universal_mcp/utils/openapi/test_generator.py +287 -0
  45. universal_mcp/utils/prompts.py +188 -341
  46. universal_mcp/utils/testing.py +190 -2
  47. {universal_mcp-0.1.24rc2.dist-info → universal_mcp-0.1.24rc4.dist-info}/METADATA +17 -3
  48. universal_mcp-0.1.24rc4.dist-info/RECORD +71 -0
  49. universal_mcp/applications/sample_tool_app.py +0 -80
  50. universal_mcp/client/agents/__init__.py +0 -4
  51. universal_mcp/client/agents/base.py +0 -38
  52. universal_mcp/client/agents/llm.py +0 -115
  53. universal_mcp/client/agents/react.py +0 -67
  54. universal_mcp/client/cli.py +0 -181
  55. universal_mcp-0.1.24rc2.dist-info/RECORD +0 -53
  56. {universal_mcp-0.1.24rc2.dist-info → universal_mcp-0.1.24rc4.dist-info}/WHEEL +0 -0
  57. {universal_mcp-0.1.24rc2.dist-info → universal_mcp-0.1.24rc4.dist-info}/entry_points.txt +0 -0
  58. {universal_mcp-0.1.24rc2.dist-info → universal_mcp-0.1.24rc4.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,576 @@
1
+ import asyncio
2
+ import datetime
3
+ import os
4
+ from typing import Annotated, cast
5
+
6
+ from langchain_core.messages import AIMessage, AIMessageChunk, BaseMessage, HumanMessage
7
+ from langgraph.graph import END, START, StateGraph
8
+ from langgraph.graph.message import add_messages
9
+ from langgraph.prebuilt import create_react_agent
10
+ from loguru import logger
11
+ from pydantic import BaseModel
12
+ from typing_extensions import TypedDict
13
+
14
+ from universal_mcp.agentr.registry import AgentrRegistry
15
+ from universal_mcp.tools import ToolManager
16
+ from universal_mcp.tools.adapters import ToolFormat
17
+ from universal_mcp.tools.registry import ToolRegistry
18
+
19
+ from .base import BaseAgent
20
+ from .llm import get_llm
21
+
22
+ # Auto Agent
23
+ # Working
24
+ # 1. For every message, and given list of tools, figure out if external tools are needed
25
+ # 2. In case of extra tools needed, make a list of tools and send to subgraph
26
+ # 3. In case no tool needed forward to simple chatbot
27
+
28
+ # Subgraph
29
+ # In case extra tools are needed, ask for clarification from user what tools are required
30
+
31
+
32
+ class State(TypedDict):
33
+ messages: Annotated[list, add_messages]
34
+ loaded_apps: list[str]
35
+ choice_data: dict | None
36
+
37
+
38
+ class AppSet(BaseModel):
39
+ """Represents a set of apps for a specific purpose"""
40
+
41
+ purpose: str
42
+ apps: list[str]
43
+ choice: bool # Whether user choice is needed for this app set
44
+
45
+
46
+ class TaskAnalysis(BaseModel):
47
+ """Combined analysis of task type and app requirements"""
48
+
49
+ requires_app: bool
50
+ reasoning: str
51
+ app_sets: list[AppSet] = [] # Multiple sets of app choices with purpose and choice flags
52
+
53
+
54
+ class UserChoices(BaseModel):
55
+ """Structured output for parsing user choice responses"""
56
+
57
+ user_choices: list[str] = []
58
+
59
+
60
+ class AutoAgent(BaseAgent):
61
+ def __init__(self, name: str, instructions: str, model: str, app_registry: ToolRegistry):
62
+ super().__init__(name, instructions, model)
63
+ self.app_registry = app_registry
64
+ self.llm_tools = get_llm(model, tags=["tools"])
65
+ self.llm_choice = get_llm(model, tags=["choice"])
66
+ self.llm_quiet = get_llm(model, tags=["quiet"])
67
+ self.tool_manager = ToolManager()
68
+
69
+ self.task_analysis_prompt = """You are a task analysis expert. Given a task description and available apps, determine:
70
+
71
+ 1. Whether the task requires an external application or can be handled through general reasoning
72
+ 2. If it requires an app, which apps are most relevant
73
+ 3. If the task requires multiple different types of functionality, organize apps into logical sets with purposes
74
+
75
+ Tasks that typically require apps:
76
+ - Searching the web for information
77
+ - Sending emails
78
+ - Creating or editing documents
79
+ - Managing calendars or schedules
80
+ - Processing data or files
81
+ - Interacting with social media
82
+ - Making API calls to external services
83
+
84
+ Tasks that typically don't require apps:
85
+ - General reasoning and analysis
86
+ - Mathematical calculations
87
+ - Text summarization or analysis
88
+ - Providing explanations or educational content
89
+ - Planning and organization
90
+ - Creative writing or brainstorming
91
+ - Logical problem solving
92
+
93
+ For complex tasks that require multiple types of functionality, organize apps into logical sets with clear purposes.
94
+ For example, if a task requires both email and search functionality, you might create:
95
+ - app_sets: [
96
+ {"purpose": "Email communication", "apps": ["outlook", "google-mail"], "choice": true},
97
+ {"purpose": "Web search", "apps": ["serpapi", "tavily"], "choice": false}
98
+ ]
99
+
100
+ Each app set should have:
101
+ - purpose: A clear description of what this set of apps is for
102
+ - apps: List of app IDs that serve this purpose
103
+ - choice: Boolean indicating if user choice is needed (true) or all apps should be auto-loaded (false)
104
+
105
+ Set choice to True if the user should choose from the apps in that set.
106
+ Set choice to False if all apps in that set should be automatically loaded.
107
+
108
+ Analyze the given task and determine if it requires an external app or can be completed through general reasoning.
109
+ If it requires an app, select the most relevant apps from the available list.
110
+ If the task requires multiple different types of functionality, organize apps into logical sets with clear purposes.
111
+
112
+ If an app has previously been loaded, it should not be loaded again.
113
+ """
114
+ logger.debug("AutoAgent initialized successfully")
115
+ self._graph = self._build_graph()
116
+
117
+ def _build_graph(self):
118
+ graph_builder = StateGraph(State)
119
+
120
+ async def task_analyzer(state: State):
121
+ """Analyze the task and determine if choice is needed"""
122
+ response = await self.run(state["messages"])
123
+
124
+ # Get current loaded_apps from state, defaulting to empty list if not present
125
+ current_loaded_apps = state.get("loaded_apps", [])
126
+
127
+ # Check if the response is choice data (dict) or a direct response (str)
128
+ if isinstance(response, dict) and "requires_app" in response:
129
+ # This is choice data - store it and ask for user input
130
+ app_sets = response.get("app_sets", [])
131
+
132
+ # Use LLM to generate a natural choice message
133
+ choice_message = await self._generate_choice_message(app_sets, response["task"])
134
+
135
+ # Update loaded_apps with any auto-selected apps from the choice data
136
+ if "auto_selected_apps" in response:
137
+ current_loaded_apps.extend(response["auto_selected_apps"])
138
+
139
+ # Return the choice message and signal to go to choice node
140
+ return {
141
+ "messages": [AIMessage(content=choice_message)],
142
+ "loaded_apps": current_loaded_apps,
143
+ "choice_data": response,
144
+ }
145
+ else:
146
+ # This is a direct response
147
+ return {
148
+ "messages": [AIMessage(content=str(response))],
149
+ "loaded_apps": current_loaded_apps,
150
+ "choice_data": None,
151
+ }
152
+
153
+ async def choice_handler(state: State):
154
+ """Handle user choice input and execute with selected apps"""
155
+ user_input = state["messages"][-1].content
156
+
157
+ # Get current loaded_apps from state, defaulting to empty list if not present
158
+ current_loaded_apps = state.get("loaded_apps", [])
159
+ choice_data = state.get("choice_data")
160
+
161
+ if not choice_data:
162
+ return {
163
+ "messages": [AIMessage(content="No choice data available. Please try again.")],
164
+ "loaded_apps": current_loaded_apps,
165
+ "choice_data": None,
166
+ }
167
+
168
+ # Parse user choices using LLM
169
+ user_choices = await self.parse_user_choices_with_llm(user_input, choice_data)
170
+
171
+ # Execute with the parsed choices
172
+ result = await self.run(state["messages"], user_choices=user_choices, loaded_apps=current_loaded_apps)
173
+
174
+ # Update loaded_apps with the user-selected apps
175
+ current_loaded_apps.extend(user_choices)
176
+
177
+ return {
178
+ "messages": [AIMessage(content=str(result))],
179
+ "loaded_apps": current_loaded_apps,
180
+ "choice_data": None,
181
+ }
182
+
183
+ graph_builder.add_node("task_analyzer", task_analyzer)
184
+ graph_builder.add_node("choice_handler", choice_handler)
185
+
186
+ # Add conditional edge from START to task_analyzer or choice_handler
187
+ def route_from_start(state: State):
188
+ # Check if we have stored choice data (indicating we need to handle choices)
189
+ if state.get("choice_data") is not None:
190
+ return "choice_handler"
191
+ else:
192
+ return "task_analyzer"
193
+
194
+ graph_builder.add_conditional_edges(START, route_from_start)
195
+ graph_builder.add_edge("task_analyzer", END)
196
+ graph_builder.add_edge("choice_handler", END)
197
+
198
+ return graph_builder.compile(checkpointer=self.memory)
199
+
200
+ @property
201
+ def graph(self):
202
+ return self._graph
203
+
204
+ async def stream(self, thread_id: str, user_input: str):
205
+ async for event, metadata in self.graph.astream(
206
+ {"messages": [{"role": "user", "content": user_input}]},
207
+ config={"configurable": {"thread_id": thread_id}},
208
+ stream_mode="messages",
209
+ ):
210
+ logger.info(f"Stream event: {event}")
211
+ logger.info(f"Stream metadata: {metadata}")
212
+ if "tags" in metadata and "quiet" in metadata["tags"]:
213
+ pass
214
+ else:
215
+ event = cast(AIMessageChunk, event)
216
+ yield event
217
+
218
+ async def get_app_details(self, app_ids: list[str]) -> list[dict]:
219
+ """Get detailed information about apps for better choice presentation"""
220
+ app_details = []
221
+
222
+ for app_id in app_ids:
223
+ try:
224
+ # Get app info from platform manager
225
+ app_info = await self.app_registry.get_app_details(app_id)
226
+ app_details.append(app_info)
227
+ except Exception as e:
228
+ logger.error(f"Error getting details for app {app_id}: {e}")
229
+ app_details.append(
230
+ {
231
+ "id": app_id,
232
+ "name": app_id,
233
+ "description": "Error loading details",
234
+ "category": "Unknown",
235
+ "available": True,
236
+ }
237
+ )
238
+
239
+ return app_details
240
+
241
+ async def get_app_choice_data(self, app_sets: list[AppSet], messages: list[BaseMessage]) -> dict:
242
+ """Get app choice data for frontend display"""
243
+ task = messages[-1].content
244
+ logger.info(f"Preparing app choice data for task: {task}")
245
+
246
+ choice_data = {"task": task, "app_sets": []}
247
+
248
+ # Load auto-selected apps immediately
249
+ auto_selected_apps = []
250
+
251
+ for set_index, app_set in enumerate(app_sets, 1):
252
+ # Get detailed information about the apps in this set
253
+ app_details = await self.get_app_details(app_set.apps)
254
+ available_apps = [app for app in app_details if app.get("available", False)]
255
+
256
+ if not available_apps:
257
+ logger.warning(f"No available apps found in set {set_index}")
258
+ continue
259
+
260
+ if len(available_apps) == 1:
261
+ # Only one available app, use it
262
+ selected = available_apps[0]["id"]
263
+ logger.info(f"Only one available app in set {set_index}: {selected}")
264
+ auto_selected_apps.append(selected)
265
+ continue
266
+
267
+ if not app_set.choice:
268
+ # Automatically load all apps in this set
269
+ selected_apps = [app["id"] for app in available_apps]
270
+ selected_names = [app["name"] for app in available_apps]
271
+ logger.info(f"Automatically loading all apps in set {set_index}: {', '.join(selected_names)}")
272
+ auto_selected_apps.extend(selected_apps)
273
+ continue
274
+
275
+ # Add this set to choice data for frontend
276
+ set_data = {
277
+ "set_index": set_index,
278
+ "purpose": app_set.purpose,
279
+ "apps": available_apps,
280
+ "needs_choice": app_set.choice,
281
+ }
282
+ choice_data["app_sets"].append(set_data)
283
+
284
+ # Load auto-selected apps immediately
285
+ if auto_selected_apps:
286
+ logger.info(f"Loading auto-selected apps: {', '.join(auto_selected_apps)}")
287
+ await self._load_actions_for_apps(auto_selected_apps)
288
+
289
+ logger.info(
290
+ f"Prepared choice data with {len(choice_data['app_sets'])} sets and {len(auto_selected_apps)} auto-selected apps"
291
+ )
292
+
293
+ # Add auto-selected apps to the choice data for state tracking
294
+ choice_data["auto_selected_apps"] = auto_selected_apps
295
+
296
+ return choice_data
297
+
298
+ async def _generate_choice_message(self, app_sets: list[dict], task: str) -> str:
299
+ """Use LLM to generate a natural choice message for app selection"""
300
+ if not app_sets:
301
+ return "I need to load some apps to help with your request."
302
+
303
+ # Format app sets for the LLM
304
+ app_sets_info = []
305
+ for i, app_set in enumerate(app_sets, 1):
306
+ purpose = app_set.get("purpose", f"Set {i}")
307
+ apps_info = []
308
+ for app in app_set.get("apps", []):
309
+ app_name = app.get("name", app.get("id"))
310
+ app_desc = app.get("description", "No description")
311
+ apps_info.append(f"- {app_name}: {app_desc}")
312
+
313
+ app_sets_info.append(f"{purpose}:\n" + "\n".join(apps_info))
314
+
315
+ app_sets_text = "\n\n".join(app_sets_info)
316
+
317
+ prompt = f"""You are an agent capable of performing different actions to help the user. The user has asked you to perform a task, however, that is possible using multiple different apps. The task is:
318
+ {task}
319
+ The user has the following apps available to them, for performing the task they have asked for:
320
+
321
+ {app_sets_text}
322
+ The above may contain multiple sets of apps, each with a different purpose for performing the task. Now draft a message asking the user to select apps from each of these sets.
323
+ Be friendly and concise, but list each set of apps clearly. Do not return any other text than the question to be asked to the user, since it will be directly sent to the user. That is, do not start with "Here is the message to be sent to the user:" or anything like that."""
324
+
325
+ try:
326
+ response = await self.llm_quiet.ainvoke(prompt)
327
+ return response.content
328
+ except Exception as e:
329
+ logger.error(f"Failed to generate choice message with LLM: {e}")
330
+ # Fallback to a simple message
331
+ if len(app_sets) == 1:
332
+ purpose = app_sets[0].get("purpose", "this task")
333
+ return (
334
+ f"I need to know which app you'd prefer to use for {purpose}. Please choose from the options above."
335
+ )
336
+ else:
337
+ return "I need to load some apps to help with your request. Please let me know which apps you'd like me to use for each category."
338
+
339
+ # TODO: Use a proper handler for this, the ui is going to send a proper json with choices
340
+
341
+ async def parse_user_choices_with_llm(self, user_input: str, choice_data: dict) -> list[str]:
342
+ """Use LLM to parse user choice input and return a list of selected app IDs"""
343
+ logger.info(f"Using LLM to parse user choices: {user_input}")
344
+
345
+ # Create a prompt for the LLM to parse the user's choice
346
+ available_apps = []
347
+ for i, app_set in enumerate(choice_data.get("app_sets", []), 1):
348
+ purpose = app_set.get("purpose", f"Set {i}")
349
+ available_apps.append(f"\n{purpose}:")
350
+ for app in app_set.get("apps", []):
351
+ available_apps.append(
352
+ f" - {app.get('name', app.get('id'))} (ID: {app.get('id')}) - {app.get('description', 'No description')}"
353
+ )
354
+
355
+ prompt = f"""
356
+ You are a choice parser. The user has been asked to choose from the following app sets:
357
+
358
+ Available apps:
359
+ {chr(10).join(available_apps)}
360
+
361
+ The user responded with: "{user_input}"
362
+
363
+ Please parse their response and extract their choices as a simple list of app IDs.
364
+
365
+ Rules:
366
+ 1. Return only the app IDs that the user selected
367
+ 2. If the user says "all" or "everything", include all apps from that set
368
+ 3. If the user says "none" or "skip", don't include that set
369
+ 4. Match app names as closely as possible to the available apps
370
+ 5. If the user's response is unclear, make your best guess based on context
371
+ 6. Return only the app IDs, not the full names
372
+ """
373
+
374
+ try:
375
+ # Use structured output with Pydantic model
376
+ structured_llm = self.llm_quiet.with_structured_output(UserChoices)
377
+ parsed_choices = await structured_llm.ainvoke(prompt)
378
+
379
+ logger.info(f"LLM parsed choices: {parsed_choices}")
380
+ return parsed_choices.user_choices
381
+
382
+ except Exception as e:
383
+ logger.error(f"Failed to parse user choices with LLM: {e}")
384
+ # Fallback to empty list
385
+ return []
386
+
387
+ async def load_action_for_app(self, app_id):
388
+ """Load actions for an app using the platform manager"""
389
+ await self.app_registry.load_tools_for_app(app_id, self.tool_manager)
390
+
391
+ async def analyze_task_and_select_apps(
392
+ self,
393
+ task: str,
394
+ available_apps: list[dict],
395
+ messages: list[BaseMessage] | None = None,
396
+ loaded_apps: list[str] | None = None,
397
+ ) -> TaskAnalysis:
398
+ """Combined task analysis and app selection to reduce LLM calls"""
399
+ logger.info(f"Analyzing task and selecting apps: {task}")
400
+
401
+ # Handle mutable default argument
402
+ if loaded_apps is None:
403
+ loaded_apps = []
404
+
405
+ # Get conversation context from messages
406
+ context_summary = ""
407
+
408
+ if messages and len(messages) > 1: # More than just the current task
409
+ # Create a summary of previous conversation context
410
+ previous_messages = messages[:-1] # Exclude current task
411
+ context_messages = []
412
+
413
+ for msg in previous_messages[-5:]: # Last 5 messages for context
414
+ if isinstance(msg, HumanMessage):
415
+ context_messages.append(f"User: {msg.content}")
416
+ elif isinstance(msg, AIMessage):
417
+ context_messages.append(f"Assistant: {msg.content[:200]}...") # Truncate long responses
418
+
419
+ if context_messages:
420
+ context_summary = "\n\nPrevious conversation context:\n" + "\n".join(context_messages)
421
+ logger.debug(f"Adding conversation context: {len(context_messages)} previous messages")
422
+
423
+ prompt = f"""
424
+ {self.task_analysis_prompt}
425
+
426
+ Task: {task}
427
+ Available apps: {available_apps}{context_summary}
428
+
429
+ Determine if this task requires an external application or can be completed through general reasoning and knowledge.
430
+ If it requires an app, select the most relevant apps from the available list.
431
+ If the task requires multiple different types of functionality, organize apps into logical sets with clear purposes using the app_sets field.
432
+
433
+ Consider the conversation context when making your decision. For example:
434
+ - If the user previously mentioned specific apps or tools, prefer those
435
+ - If the conversation is about a specific topic, choose apps relevant to that topic
436
+ - If the user is continuing a previous task, maintain consistency in app selection. You do not need to load the same app again.
437
+ The set of loaded apps is {loaded_apps}
438
+ """
439
+
440
+ # Use structured output with Pydantic model
441
+ structured_llm = self.llm_quiet.with_structured_output(TaskAnalysis)
442
+ response = await structured_llm.ainvoke(prompt)
443
+ logger.debug(f"Task analysis response: {response}")
444
+
445
+ logger.info(f"Task requires app: {response.requires_app}")
446
+ logger.info(f"Reasoning: {response.reasoning}")
447
+ if response.requires_app:
448
+ logger.info(f"App sets: {response.app_sets}")
449
+
450
+ return response
451
+
452
+ async def _load_actions_for_apps(self, selected_apps: list[str]) -> None:
453
+ """Load actions for a list of apps"""
454
+ for app_id in selected_apps:
455
+ logger.info(f"Loading actions for app: {app_id}")
456
+ try:
457
+ await self.load_action_for_app(app_id)
458
+ logger.info(f"Successfully loaded actions for app: {app_id}")
459
+ except Exception as e:
460
+ logger.error(f"Failed to load actions for app {app_id}: {e}")
461
+ continue
462
+
463
+ async def _execute_with_selected_apps(self, selected_apps: list[str], messages: list[BaseMessage] = None) -> str:
464
+ """Load selected apps and execute the task, falling back to general reasoning if needed"""
465
+ if not selected_apps:
466
+ logger.warning("No apps selected, using general reasoning")
467
+ return await self._execute_task_with_agent(messages or [])
468
+
469
+ await self._load_actions_for_apps(selected_apps)
470
+
471
+ logger.info(f"Successfully loaded actions for {len(selected_apps)} apps: {', '.join(selected_apps)}")
472
+ return await self._execute_task_with_agent(messages or [])
473
+
474
+ async def _execute_task_with_agent(self, messages: list[BaseMessage]) -> str:
475
+ """Execute a task using the current agent with provided messages"""
476
+ agent = self.get_agent()
477
+ results = await agent.ainvoke({"messages": messages})
478
+ ai_message = results["messages"][-1]
479
+ return ai_message.content
480
+
481
+ def get_agent(self):
482
+ """Get or create an agent with tools."""
483
+ # Always create a new agent when requested or if no agent exists
484
+
485
+ logger.info("Creating new agent with tools")
486
+ tools = self.tool_manager.list_tools(format=ToolFormat.LANGCHAIN)
487
+ logger.debug(f"Created agent with {len(tools)} tools")
488
+
489
+ # Get current datetime and timezone information
490
+ current_time = datetime.datetime.now()
491
+ utc_time = datetime.datetime.now(datetime.UTC)
492
+ timezone_info = f"Current local time: {current_time.strftime('%Y-%m-%d %H:%M:%S')} | UTC time: {utc_time.strftime('%Y-%m-%d %H:%M:%S')}"
493
+
494
+ agent = create_react_agent(
495
+ self.llm_tools,
496
+ tools=tools,
497
+ prompt=f"You are a helpful assistant that is given a list of actions for an app. You are also given a task. Use the tools to complete the task. Current time information: {timezone_info}. Additionally, the following instructions have been given by the user: {self.instructions}",
498
+ )
499
+ logger.info("Agent created successfully")
500
+
501
+ return agent
502
+
503
+ async def run(
504
+ self, messages: list[BaseMessage], user_choices: list[str] | None = None, loaded_apps: list[str] | None = None
505
+ ):
506
+ # Extract task from the last message
507
+ if not messages or len(messages) == 0:
508
+ raise ValueError("No messages provided")
509
+
510
+ # Handle mutable default argument
511
+ if loaded_apps is None:
512
+ loaded_apps = []
513
+
514
+ task = messages[-1].content
515
+ logger.info(f"Starting task execution: {task}")
516
+
517
+ # If user_choices are provided, skip task analysis and execute directly
518
+ if user_choices:
519
+ logger.info("User choices provided, skipping task analysis")
520
+ logger.info(f"User selected apps: {', '.join(user_choices)}")
521
+ result = await self._execute_with_selected_apps(user_choices, messages)
522
+ return result
523
+
524
+ # Get all available apps from platform manager
525
+ available_apps = await self.app_registry.list_apps()
526
+
527
+ logger.info(f"Found {len(available_apps)} available apps")
528
+
529
+ # Analyze task and select apps
530
+ task_analysis = await self.analyze_task_and_select_apps(task, available_apps, messages, loaded_apps)
531
+
532
+ if not task_analysis.requires_app:
533
+ logger.info("Task does not require an app, using general reasoning")
534
+ return await self._execute_task_with_agent(messages or [])
535
+
536
+ if not task_analysis.app_sets:
537
+ logger.warning(f"No suitable app found for task: {task}")
538
+ logger.info("Falling back to general reasoning for this task")
539
+ return await self._execute_task_with_agent(messages or [])
540
+
541
+ # Check if choices are required
542
+ choice_data = await self.get_app_choice_data(task_analysis.app_sets, messages)
543
+ choice_data["requires_app"] = True
544
+ choice_data["reasoning"] = task_analysis.reasoning
545
+ choice_data["task"] = task
546
+
547
+ # If no choices are needed (all apps auto-selected), execute directly
548
+ if not choice_data["app_sets"]:
549
+ logger.info("No user choices required, auto-selected apps already loaded")
550
+ return await self._execute_task_with_agent(messages)
551
+
552
+ logger.info("User choices required, providing choice data")
553
+ return choice_data
554
+
555
+
556
+ if __name__ == "__main__":
557
+ # Test the AutoAgent
558
+
559
+ # Get API key from environment or use a placeholder
560
+ agentr_api_key = os.getenv("AGENTR_API_KEY", "test_api_key")
561
+ if not agentr_api_key:
562
+ agentr_api_key = input("Enter your API key: ")
563
+
564
+ # Create platform manager
565
+ app_registry = AgentrRegistry(api_key=agentr_api_key)
566
+ want_instructions = input("Do you want to add a system prompt/instructions? (Y/N)")
567
+ instructions = "" if want_instructions.upper() == "N" else input("Enter your instructions/system prompt: ")
568
+
569
+ agent = AutoAgent("Auto Agent", instructions, "gpt-4.1", app_registry=app_registry)
570
+
571
+ print("AutoAgent created successfully!")
572
+ print(f"Agent name: {agent.name}")
573
+ print(f"Agent instructions: {agent.instructions}")
574
+ print(f"Agent model: {agent.model}")
575
+
576
+ asyncio.run(agent.run_interactive())
@@ -0,0 +1,88 @@
1
+ # agents/base.py
2
+ from typing import cast
3
+ from uuid import uuid4
4
+
5
+ from langchain_core.messages import AIMessageChunk
6
+ from langgraph.checkpoint.memory import MemorySaver
7
+ from langgraph.types import Command
8
+
9
+ from .utils import RichCLI
10
+
11
+
12
+ class BaseAgent:
13
+ def __init__(self, name: str, instructions: str, model: str):
14
+ self.name = name
15
+ self.instructions = instructions
16
+ self.model = model
17
+ self.memory = MemorySaver()
18
+ self.cli = RichCLI()
19
+
20
+ @property
21
+ def graph(self):
22
+ raise NotImplementedError("Subclasses must implement this method")
23
+
24
+ async def stream(self, thread_id: str, user_input: str):
25
+ async for event, _ in self.graph.astream(
26
+ {"messages": [{"role": "user", "content": user_input}]},
27
+ config={"configurable": {"thread_id": thread_id}},
28
+ stream_mode="messages",
29
+ ):
30
+ event = cast(AIMessageChunk, event)
31
+ yield event
32
+
33
+ async def stream_interactive(self, thread_id: str, user_input: str):
34
+ with self.cli.display_agent_response_streaming(self.name) as stream_updater:
35
+ async for event in self.stream(thread_id, user_input):
36
+ stream_updater.update(event.content)
37
+
38
+ async def process_command(self, command: str) -> bool | None:
39
+ """Process a command from the user"""
40
+
41
+ async def run_interactive(self, thread_id: str = str(uuid4())):
42
+ """Main application loop"""
43
+
44
+ # Display welcome
45
+ self.cli.display_welcome(self.name)
46
+
47
+ # Main loop
48
+ while True:
49
+ try:
50
+ state = self.graph.get_state(config={"configurable": {"thread_id": thread_id}})
51
+ if state.interrupts:
52
+ value = self.cli.handle_interrupt(state.interrupts[0])
53
+ self.graph.invoke(Command(resume=value), config={"configurable": {"thread_id": thread_id}})
54
+ continue
55
+
56
+ user_input = self.cli.get_user_input()
57
+ if not user_input.strip():
58
+ continue
59
+
60
+ # Process commands
61
+ if user_input.startswith("/"):
62
+ command = user_input.lower().lstrip("/")
63
+ if command == "about":
64
+ self.cli.display_info(f"Agent is {self.name}. {self.instructions}")
65
+ continue
66
+ elif command == "exit" or command == "quit" or command == "q":
67
+ self.cli.display_info("Goodbye! 👋")
68
+ break
69
+ elif command == "reset":
70
+ self.cli.clear_screen()
71
+ self.cli.display_info("Resetting agent...")
72
+ thread_id = str(uuid4())
73
+ continue
74
+ elif command == "help":
75
+ self.cli.display_info("Available commands: /about, /exit, /quit, /q, /reset")
76
+ continue
77
+ else:
78
+ self.cli.display_error(f"Unknown command: {command}")
79
+ continue
80
+
81
+ # Process with agent
82
+ await self.stream_interactive(thread_id, user_input)
83
+
84
+ except KeyboardInterrupt:
85
+ self.cli.display_info("\nGoodbye! 👋")
86
+ break
87
+ except Exception as e:
88
+ self.cli.display_error(f"An error occurred: {str(e)}")
@@ -0,0 +1,27 @@
1
+ from typer import Typer
2
+
3
+ from universal_mcp.agents import ReactAgent
4
+ from universal_mcp.logger import setup_logger
5
+
6
+ app = Typer()
7
+
8
+
9
+ @app.command(
10
+ help="Run the agent CLI",
11
+ epilog="""
12
+ Example:
13
+ mcp client run --config client_config.json
14
+ """,
15
+ )
16
+ def run():
17
+ """Run the agent CLI"""
18
+ import asyncio
19
+
20
+ setup_logger(log_file=None, level="WARNING")
21
+
22
+ agent = ReactAgent("React Agent", "You are a helpful assistant", "openrouter/auto")
23
+ asyncio.run(agent.run_interactive())
24
+
25
+
26
+ if __name__ == "__main__":
27
+ app()