universal-mcp-agents 0.1.13__py3-none-any.whl → 0.1.15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of universal-mcp-agents might be problematic. Click here for more details.

Files changed (49) hide show
  1. universal_mcp/agents/__init__.py +1 -1
  2. universal_mcp/agents/base.py +3 -0
  3. universal_mcp/agents/bigtool/__init__.py +1 -1
  4. universal_mcp/agents/bigtool/__main__.py +4 -3
  5. universal_mcp/agents/bigtool/agent.py +3 -2
  6. universal_mcp/agents/bigtool/graph.py +68 -31
  7. universal_mcp/agents/bigtool/prompts.py +2 -2
  8. universal_mcp/agents/bigtool/tools.py +17 -4
  9. universal_mcp/agents/builder/__main__.py +129 -28
  10. universal_mcp/agents/builder/builder.py +149 -161
  11. universal_mcp/agents/builder/helper.py +71 -0
  12. universal_mcp/agents/builder/prompts.py +94 -160
  13. universal_mcp/agents/codeact0/__init__.py +2 -1
  14. universal_mcp/agents/codeact0/agent.py +13 -5
  15. universal_mcp/agents/codeact0/langgraph_agent.py +14 -0
  16. universal_mcp/agents/codeact0/llm_tool.py +1 -2
  17. universal_mcp/agents/codeact0/playbook_agent.py +353 -0
  18. universal_mcp/agents/codeact0/prompts.py +126 -41
  19. universal_mcp/agents/codeact0/sandbox.py +43 -32
  20. universal_mcp/agents/codeact0/state.py +27 -3
  21. universal_mcp/agents/codeact0/tools.py +180 -0
  22. universal_mcp/agents/codeact0/utils.py +89 -75
  23. universal_mcp/agents/shared/__main__.py +44 -0
  24. universal_mcp/agents/shared/prompts.py +49 -98
  25. universal_mcp/agents/shared/tool_node.py +160 -176
  26. universal_mcp/agents/utils.py +71 -0
  27. universal_mcp/applications/ui/app.py +2 -2
  28. {universal_mcp_agents-0.1.13.dist-info → universal_mcp_agents-0.1.15.dist-info}/METADATA +3 -3
  29. universal_mcp_agents-0.1.15.dist-info/RECORD +50 -0
  30. universal_mcp/agents/codeact0/usecases/1-unsubscribe.yaml +0 -4
  31. universal_mcp/agents/codeact0/usecases/10-reddit2.yaml +0 -10
  32. universal_mcp/agents/codeact0/usecases/11-github.yaml +0 -13
  33. universal_mcp/agents/codeact0/usecases/2-reddit.yaml +0 -27
  34. universal_mcp/agents/codeact0/usecases/2.1-instructions.md +0 -81
  35. universal_mcp/agents/codeact0/usecases/2.2-instructions.md +0 -71
  36. universal_mcp/agents/codeact0/usecases/3-earnings.yaml +0 -4
  37. universal_mcp/agents/codeact0/usecases/4-maps.yaml +0 -41
  38. universal_mcp/agents/codeact0/usecases/5-gmailreply.yaml +0 -8
  39. universal_mcp/agents/codeact0/usecases/6-contract.yaml +0 -6
  40. universal_mcp/agents/codeact0/usecases/7-overnight.yaml +0 -14
  41. universal_mcp/agents/codeact0/usecases/8-sheets_chart.yaml +0 -25
  42. universal_mcp/agents/codeact0/usecases/9-learning.yaml +0 -9
  43. universal_mcp/agents/planner/__init__.py +0 -51
  44. universal_mcp/agents/planner/__main__.py +0 -28
  45. universal_mcp/agents/planner/graph.py +0 -85
  46. universal_mcp/agents/planner/prompts.py +0 -14
  47. universal_mcp/agents/planner/state.py +0 -11
  48. universal_mcp_agents-0.1.13.dist-info/RECORD +0 -63
  49. {universal_mcp_agents-0.1.13.dist-info → universal_mcp_agents-0.1.15.dist-info}/WHEEL +0 -0
@@ -1,12 +1,36 @@
1
- from typing import Any
1
+ from typing import Annotated, Any
2
2
 
3
- from langgraph.graph import MessagesState
3
+ from langgraph.prebuilt.chat_agent_executor import AgentState
4
4
 
5
5
 
6
- class CodeActState(MessagesState):
6
+ def _enqueue(left: list, right: list) -> list:
7
+ """Treat left as a FIFO queue, append new items from right (preserve order),
8
+ keep items unique, and cap total size to 20 (drop oldest items)."""
9
+ max_size = 30
10
+ preferred_size = 20
11
+ if len(right) > preferred_size:
12
+ preferred_size = min(max_size, len(right))
13
+ queue = list(left or [])
14
+
15
+ for item in right[:preferred_size] or []:
16
+ if item in queue:
17
+ queue.remove(item)
18
+ queue.append(item)
19
+
20
+ if len(queue) > preferred_size:
21
+ queue = queue[-preferred_size:]
22
+
23
+ return queue
24
+
25
+
26
+ class CodeActState(AgentState):
7
27
  """State for CodeAct agent."""
8
28
 
9
29
  context: dict[str, Any]
10
30
  """Dictionary containing the execution context with available tools and variables."""
11
31
  add_context: dict[str, Any]
12
32
  """Dictionary containing the additional context (functions, classes, imports) to be added to the execution context."""
33
+ playbook_mode: str | None
34
+ """State for the playbook agent."""
35
+ selected_tool_ids: Annotated[list[str], _enqueue]
36
+ """Queue for tools exported from registry"""
@@ -0,0 +1,180 @@
1
+ import asyncio
2
+ from collections import defaultdict
3
+ from typing import Any
4
+
5
+ from langchain_core.tools import tool
6
+ from universal_mcp.tools.registry import ToolRegistry
7
+
8
+ MAX_LENGHT=100
9
+
10
+ def enter_playbook_mode():
11
+ """Call this function to enter playbook mode. Playbook mode is when the user wants to store a repeated task as a script with some inputs for the future."""
12
+ return
13
+
14
+ def exit_playbook_mode():
15
+ """Call this function to exit playbook mode. Playbook mode is when the user wants to store a repeated task as a script with some inputs for the future."""
16
+ return
17
+
18
+
19
+
20
+ def create_meta_tools(tool_registry: ToolRegistry) -> dict[str, Any]:
21
+ """Create the meta tools for searching and loading tools"""
22
+
23
+ @tool
24
+ async def search_functions(queries: list[str]) -> str:
25
+ """Search for relevant functions given list of queries.
26
+ Each single query should be atomic (doable with a single function).
27
+ For tasks requiring multiple functions, add separate queries for each subtask"""
28
+ try:
29
+ # Fetch all connections
30
+ connections = await tool_registry.list_connected_apps()
31
+ connected_apps = {connection["app_id"] for connection in connections}
32
+
33
+ # Use defaultdict to avoid key existence checks
34
+ app_tools = defaultdict(list)
35
+
36
+ # Process all queries concurrently
37
+ search_tasks = []
38
+ for query in queries:
39
+ search_tasks.append(_search_query_tools(query))
40
+
41
+ query_results = await asyncio.gather(*search_tasks)
42
+
43
+ # Aggregate results with limit per app
44
+ for tools_list in query_results:
45
+ for tool in tools_list:
46
+ app = tool["id"].split("__")[0]
47
+ if len(app_tools[app]) < MAX_LENGHT:
48
+ cleaned_desc = tool["description"].split("Context:")[0].strip()
49
+ app_tools[app].append(f"{tool['id']}: {cleaned_desc}")
50
+
51
+ # Build result string efficiently
52
+ result_parts = []
53
+ for app, tools in app_tools.items():
54
+ app_status = "connected" if app in connected_apps else "NOT connected"
55
+ result_parts.append(f"Tools from {app} (status: {app_status} by user):")
56
+ for tool in tools:
57
+ result_parts.append(f" - {tool}")
58
+ result_parts.append("") # Empty line between apps
59
+
60
+ result_parts.append("Call load_functions to select the required functions only.")
61
+ return "\n".join(result_parts)
62
+
63
+ except Exception as e:
64
+ return f"Error: {e}"
65
+
66
+ async def _search_query_tools(query: str) -> list[dict]:
67
+ """Helper function to search apps and tools for a single query."""
68
+ # Start both searches concurrently
69
+ tools_search_task = tool_registry.search_tools(query, limit=10)
70
+ apps_search_task = tool_registry.search_apps(query, limit=4)
71
+
72
+ # Wait for both to complete
73
+ tools_from_general_search, apps_list = await asyncio.gather(tools_search_task, apps_search_task)
74
+
75
+ # Create tasks for searching tools from each app
76
+ app_tool_tasks = [tool_registry.search_tools(query, limit=5, app_id=app["id"]) for app in apps_list]
77
+
78
+ # Wait for all app-specific tool searches to complete
79
+ app_tools_results = await asyncio.gather(*app_tool_tasks)
80
+
81
+ # Combine all results
82
+ tools_list = list(tools_from_general_search)
83
+ for app_tools in app_tools_results:
84
+ tools_list.extend(app_tools)
85
+
86
+ return tools_list
87
+
88
+ @tool
89
+ async def load_functions(tool_ids: list[str]) -> str:
90
+ """Load specific functions by their IDs for use in subsequent steps.
91
+
92
+ Args:
93
+ tool_ids: Function ids in the form 'app__function'. Example: 'google_mail__send_email'
94
+
95
+ Returns:
96
+ Confirmation message about loaded functions
97
+ """
98
+ return f"Successfully loaded {len(tool_ids)} functions: {tool_ids}"
99
+
100
+ @tool
101
+ async def web_search(query: str) -> list:
102
+ """Search the web for the given query and return structured search results.
103
+
104
+ Do not use for app-specific searches (for example, reddit or linkedin searches
105
+
106
+ Returns:
107
+ list: A list of up to 10 search result dictionaries, each containing:
108
+ - id (str): Unique identifier, typically the URL
109
+ - title (str): The title/headline of the search result
110
+ - url (str): The web URL of the result
111
+ - publishedDate (str): ISO 8601 formatted date (e.g., "2025-01-01T00:00:00.000Z")
112
+ - author (str): Author name (may be empty string)
113
+ - summary (str): Text summary/snippet of the content
114
+ - image (str): URL to associated image (if available)
115
+
116
+ Example:
117
+ results = await web_search(query="python programming")
118
+ """
119
+ response = await tool_registry.call_tool(
120
+ "exa__search_with_filters", {"query": query, "contents": {"summary": True}}
121
+ )
122
+ return response["results"]
123
+
124
+ return {"search_functions": search_functions, "load_functions": load_functions, "web_search": web_search}
125
+
126
+
127
+ async def get_valid_tools(tool_ids: list[str], registry: ToolRegistry) -> tuple[list[str], list[str]]:
128
+ """For a given list of tool_ids, validates the tools and returns a list of links for the apps that have not been logged in"""
129
+ correct, incorrect = [], []
130
+ connections = await registry.list_connected_apps()
131
+ connected_apps = {connection["app_id"] for connection in connections}
132
+ unconnected = set()
133
+ unconnected_links = []
134
+ app_tool_list: dict[str, set[str]] = {}
135
+
136
+ # Group tool_ids by app for fewer registry calls
137
+ app_to_tools: dict[str, list[tuple[str, str]]] = {}
138
+ for tool_id in tool_ids:
139
+ if "__" not in tool_id:
140
+ incorrect.append(tool_id)
141
+ continue
142
+ app, tool_name = tool_id.split("__", 1)
143
+ app_to_tools.setdefault(app, []).append((tool_id, tool_name))
144
+
145
+ # Fetch all apps concurrently
146
+ async def fetch_tools(app: str):
147
+ try:
148
+ tools_dict = await registry.list_tools(app)
149
+ return app, {tool_unit["name"] for tool_unit in tools_dict}
150
+ except Exception:
151
+ return app, None
152
+
153
+ results = await asyncio.gather(*(fetch_tools(app) for app in app_to_tools))
154
+
155
+ # Build map of available tools per app
156
+ for app, tools in results:
157
+ if tools is not None:
158
+ app_tool_list[app] = tools
159
+
160
+ # Validate tool_ids
161
+ for app, tool_entries in app_to_tools.items():
162
+ available = app_tool_list.get(app)
163
+ if available is None:
164
+ incorrect.extend(tool_id for tool_id, _ in tool_entries)
165
+ continue
166
+ if app not in connected_apps and app not in unconnected:
167
+ unconnected.add(app)
168
+ text = registry.client.get_authorization_url(app)
169
+ start = text.find(":") + 1
170
+ end = text.find(". R", start)
171
+ url = text[start:end].strip()
172
+ markdown_link = f"[{app}]({url})"
173
+ unconnected_links.append(markdown_link)
174
+ for tool_id, tool_name in tool_entries:
175
+ if tool_name in available:
176
+ correct.append(tool_id)
177
+ else:
178
+ incorrect.append(tool_id)
179
+
180
+ return correct, unconnected_links
@@ -1,10 +1,12 @@
1
+ import ast
2
+ import importlib
1
3
  import re
2
4
  from collections.abc import Sequence
3
5
  from typing import Any
4
6
 
5
7
  from langchain_core.messages import BaseMessage
6
- from pydantic import ValidationError
7
- from requests import JSONDecodeError
8
+
9
+ MAX_CHARS = 300
8
10
 
9
11
 
10
12
  def light_copy(data):
@@ -15,13 +17,13 @@ def light_copy(data):
15
17
  data: Either a dictionary with string keys, or a sequence of such dictionaries
16
18
 
17
19
  Returns:
18
- A deep copy where all string values are truncated to 30 characters
20
+ A deep copy where all string values are truncated to MAX_CHARS characters
19
21
  """
20
22
 
21
23
  def truncate_string(value):
22
- """Truncate string to 30 chars, preserve other types"""
23
- if isinstance(value, str) and len(value) > 30:
24
- return value[:30] + "..."
24
+ """Truncate string to MAX_CHARS chars, preserve other types"""
25
+ if isinstance(value, str) and len(value) > MAX_CHARS:
26
+ return value[:MAX_CHARS] + "..."
25
27
  return value
26
28
 
27
29
  def copy_dict(d):
@@ -75,44 +77,6 @@ def make_safe_function_name(name: str) -> str:
75
77
  return safe_name
76
78
 
77
79
 
78
- def filter_retry_on(exc: Exception) -> bool:
79
- import httpx
80
- import requests
81
-
82
- if isinstance(
83
- exc,
84
- (
85
- ConnectionError,
86
- JSONDecodeError,
87
- ValidationError,
88
- ),
89
- ):
90
- return True
91
- if isinstance(
92
- exc,
93
- (
94
- ValueError,
95
- TypeError,
96
- ArithmeticError,
97
- ImportError,
98
- LookupError,
99
- NameError,
100
- SyntaxError,
101
- RuntimeError,
102
- ReferenceError,
103
- StopIteration,
104
- StopAsyncIteration,
105
- OSError,
106
- ),
107
- ):
108
- return False
109
- if isinstance(exc, httpx.HTTPStatusError):
110
- return 500 <= exc.response.status_code < 600
111
- if isinstance(exc, requests.HTTPError):
112
- return 500 <= exc.response.status_code < 600 if exc.response else True
113
- return True
114
-
115
-
116
80
  def derive_context(code: str, context: dict[str, Any]) -> dict[str, Any]:
117
81
  """
118
82
  Derive context from code by extracting classes, functions, and import statements.
@@ -124,8 +88,6 @@ def derive_context(code: str, context: dict[str, Any]) -> dict[str, Any]:
124
88
  Returns:
125
89
  Updated context dictionary with extracted entities
126
90
  """
127
- import ast
128
- import re
129
91
 
130
92
  # Initialize context keys if they don't exist
131
93
  if "imports" not in context:
@@ -177,19 +139,15 @@ def derive_context(code: str, context: dict[str, Any]) -> dict[str, Any]:
177
139
  if class_def not in context["classes"]:
178
140
  context["classes"].append(class_def)
179
141
 
180
- # Extract function definitions (only top-level functions, not class methods)
142
+ # Extract function definitions (including async)
181
143
  for node in ast.walk(tree):
182
- if isinstance(node, ast.FunctionDef):
183
- # Get the function definition as a string
144
+ if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef)):
184
145
  func_lines = code.split("\n")[node.lineno - 1 : node.end_lineno]
185
146
  func_def = "\n".join(func_lines)
186
147
 
187
- # Check if this is a top-level function by looking at indentation
188
- # Top-level functions should start at column 0 (no indentation)
148
+ # Only top-level functions (col_offset == 0)
189
149
  if node.col_offset == 0:
190
- # Clean up the function definition (remove leading/trailing whitespace)
191
150
  func_def = func_def.strip()
192
-
193
151
  if func_def not in context["functions"]:
194
152
  context["functions"].append(func_def)
195
153
 
@@ -208,8 +166,8 @@ def derive_context(code: str, context: dict[str, Any]) -> dict[str, Any]:
208
166
  if "from" in pattern:
209
167
  module = match.group(1)
210
168
  imports = match.group(2).split(",")
211
- for imp in imports:
212
- imp = imp.strip()
169
+ for import_name in imports:
170
+ imp = import_name.strip()
213
171
  if " as " in imp:
214
172
  name, alias = imp.split(" as ")
215
173
  import_stmt = f"from {module} import {name.strip()} as {alias.strip()}"
@@ -278,8 +236,6 @@ def inject_context(
278
236
  namespace = inject_context(context, existing_ns)
279
237
  # namespace will contain: {'math': <math module>, 'data': [1, 2, 3], 'pandas': <module>, 'pd': <module>, 'numpy': <module>, 'np': <module>, 'MyClass': <class>, 'MC': <class>, 'my_function': <function>, ...}
280
238
  """
281
- import importlib
282
- from typing import Any
283
239
 
284
240
  # Start with existing namespace or create new one
285
241
  namespace: dict[str, Any] = existing_namespace.copy() if existing_namespace is not None else {}
@@ -292,7 +248,6 @@ def inject_context(
292
248
  exec(import_statement, namespace)
293
249
  except Exception as e:
294
250
  # If execution fails, try to extract module name and create placeholder
295
- import re
296
251
 
297
252
  # Handle different import patterns
298
253
  import_match = re.search(r"import\s+(\w+)(?:\s+as\s+(\w+))?", import_statement)
@@ -323,7 +278,6 @@ def inject_context(
323
278
  exec(class_definition, namespace)
324
279
  except Exception:
325
280
  # If execution fails, try to extract class name and create placeholder
326
- import re
327
281
 
328
282
  class_match = re.search(r"class\s+(\w+)", class_definition)
329
283
  if class_match:
@@ -332,14 +286,14 @@ def inject_context(
332
286
  # Create a placeholder class
333
287
  class PlaceholderClass:
334
288
  def __init__(self, *args, **kwargs):
335
- raise NotImplementedError(f"Class '{class_name}' failed to load: {str(e)}")
289
+ raise NotImplementedError("Class '{class_name}' failed to load")
336
290
 
337
291
  namespace[class_name] = PlaceholderClass
338
292
  else:
339
293
  # If we can't extract class name, create a generic placeholder
340
294
  class GenericPlaceholderClass:
341
295
  def __init__(self, *args, **kwargs):
342
- raise NotImplementedError(f"Class definition failed to load: {str(e)}")
296
+ raise NotImplementedError("Class definition failed to load")
343
297
 
344
298
  namespace[f"class_{len(namespace)}"] = GenericPlaceholderClass
345
299
 
@@ -351,24 +305,84 @@ def inject_context(
351
305
  exec(function_definition, namespace)
352
306
  except Exception:
353
307
  # If execution fails, try to extract function name and create placeholder
354
- import re
355
-
356
- func_match = re.search(r"def\s+(\w+)", function_definition)
308
+ func_match = re.search(r"(async\s+)?def\s+(\w+)", function_definition)
357
309
  if func_match:
358
- func_name = func_match.group(1)
310
+ func_name = func_match.group(2)
311
+ is_async = bool(func_match.group(1))
312
+
313
+ if is_async:
314
+
315
+ async def placeholder_func(*args, **kwargs):
316
+ raise NotImplementedError(f"Async function '{func_name}' failed to load")
317
+ else:
359
318
 
360
- # Create a placeholder function
361
- def placeholder_func(*args, **kwargs):
362
- raise NotImplementedError(f"Function '{func_name}' failed to load: {str(e)}")
319
+ def placeholder_func(*args, **kwargs):
320
+ raise NotImplementedError(f"Function '{func_name}' failed to load")
363
321
 
364
322
  placeholder_func.__name__ = func_name
365
323
  namespace[func_name] = placeholder_func
366
- else:
367
- # If we can't extract function name, create a generic placeholder
368
- def generic_placeholder_func(*args, **kwargs):
369
- raise NotImplementedError(f"Function definition failed to load: {str(e)}")
370
-
371
- generic_placeholder_func.__name__ = f"func_{len(namespace)}"
372
- namespace[generic_placeholder_func.__name__] = generic_placeholder_func
373
324
 
374
325
  return namespace
326
+
327
+
328
+ def schema_to_signature(schema: dict, func_name="my_function") -> str:
329
+ type_map = {
330
+ "integer": "int",
331
+ "string": "str",
332
+ "boolean": "bool",
333
+ "null": "None",
334
+ }
335
+
336
+ params = []
337
+ for name, meta in schema.items():
338
+ # figure out type
339
+ if "type" in meta:
340
+ typ = type_map.get(meta["type"], "Any")
341
+ elif "anyOf" in meta:
342
+ types = [type_map.get(t["type"], "Any") for t in meta["anyOf"]]
343
+ typ = " | ".join(set(types))
344
+ else:
345
+ typ = "Any"
346
+
347
+ default = meta.get("default", None)
348
+ default_repr = repr(default)
349
+
350
+ params.append(f"{name}: {typ} = {default_repr}")
351
+
352
+ # join into signature
353
+ param_str = ",\n ".join(params)
354
+ return f"def {func_name}(\n {param_str},\n):"
355
+
356
+
357
+ def smart_truncate(
358
+ output: str, max_chars_full: int = 2000, max_lines_headtail: int = 20, summary_threshold: int = 10000
359
+ ) -> str:
360
+ """
361
+ Truncates or summarizes output intelligently to avoid filling the context too fast.
362
+
363
+ Args:
364
+ output (str): The string output from code execution.
365
+ max_chars_full (int): Max characters to keep full output.
366
+ max_lines_headtail (int): Number of lines to keep from head and tail for medium outputs.
367
+ summary_threshold (int): If truncated output exceeds this, hard-truncate.
368
+
369
+ Returns:
370
+ str: Truncated or summarized output.
371
+ """
372
+ if len(output) <= max_chars_full:
373
+ return output # Small output, include fully
374
+
375
+ lines = output.splitlines()
376
+ if len(lines) <= 2 * max_lines_headtail:
377
+ return output # Medium output, include fully
378
+
379
+ # Medium-large output: take head + tail
380
+ head = "\n".join(lines[:max_lines_headtail])
381
+ tail = "\n".join(lines[-max_lines_headtail:])
382
+ truncated = f"{head}\n... [truncated {len(lines) - 2 * max_lines_headtail} lines] ...\n{tail}"
383
+
384
+ # If still too big, cut to summary threshold
385
+ if len(truncated) > summary_threshold:
386
+ truncated = truncated[:summary_threshold] + "\n... [output truncated to fit context] ..."
387
+
388
+ return truncated
@@ -0,0 +1,44 @@
1
+ import asyncio
2
+
3
+ from rich import print
4
+ from universal_mcp.agentr.registry import AgentrRegistry
5
+ from universal_mcp.logger import setup_logger
6
+
7
+ from universal_mcp.agents.llm import load_chat_model
8
+ from universal_mcp.agents.shared.tool_node import build_tool_node_graph
9
+
10
+
11
+ async def main():
12
+ """
13
+ An example of how to run the tool_node graph independently.
14
+ """
15
+ setup_logger(level="INFO")
16
+
17
+ user_input = "What are the topics of my meetings today from Google Calendar and who are the attendees? Give a 1-line context for each attendee using LinkedIn or web search."
18
+
19
+ print(f"▶️ User Task: [bold cyan]'{user_input}'[/bold cyan]\n")
20
+
21
+ llm = load_chat_model("azure/gpt-4.1", thinking=False)
22
+ registry = AgentrRegistry()
23
+
24
+ graph = build_tool_node_graph(llm=llm, registry=registry)
25
+
26
+ initial_state = {"original_task": user_input}
27
+
28
+ print("🚀 Invoking the tool selection graph...")
29
+ final_state = await graph.ainvoke(initial_state)
30
+
31
+ execution_plan = final_state.get("execution_plan")
32
+
33
+ print("\n[bold green]✅ Graph execution complete![/bold green]")
34
+ print("\n--- Final Execution Plan (Selected Tools) ---")
35
+ if execution_plan:
36
+ print(execution_plan)
37
+ else:
38
+ print("[bold red]No execution plan was created.[/bold red]")
39
+ if messages := final_state.get("messages"):
40
+ print(f"Final Message: {messages[-1].content}")
41
+
42
+
43
+ if __name__ == "__main__":
44
+ asyncio.run(main())