fast-agent-mcp 0.1.4__py3-none-any.whl → 0.1.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (28) hide show
  1. {fast_agent_mcp-0.1.4.dist-info → fast_agent_mcp-0.1.5.dist-info}/METADATA +5 -1
  2. {fast_agent_mcp-0.1.4.dist-info → fast_agent_mcp-0.1.5.dist-info}/RECORD +28 -17
  3. mcp_agent/agents/agent.py +46 -0
  4. mcp_agent/core/agent_app.py +373 -9
  5. mcp_agent/core/decorators.py +455 -0
  6. mcp_agent/core/enhanced_prompt.py +70 -4
  7. mcp_agent/core/factory.py +501 -0
  8. mcp_agent/core/fastagent.py +140 -1059
  9. mcp_agent/core/proxies.py +51 -11
  10. mcp_agent/core/validation.py +221 -0
  11. mcp_agent/human_input/handler.py +5 -2
  12. mcp_agent/mcp/mcp_aggregator.py +537 -47
  13. mcp_agent/mcp/mcp_connection_manager.py +13 -2
  14. mcp_agent/mcp_server/__init__.py +4 -0
  15. mcp_agent/mcp_server/agent_server.py +121 -0
  16. mcp_agent/resources/examples/internal/fastagent.config.yaml +52 -0
  17. mcp_agent/resources/examples/internal/prompt_category.py +21 -0
  18. mcp_agent/resources/examples/internal/prompt_sizing.py +53 -0
  19. mcp_agent/resources/examples/internal/sizer.py +24 -0
  20. mcp_agent/resources/examples/researcher/fastagent.config.yaml +14 -1
  21. mcp_agent/resources/examples/workflows/sse.py +23 -0
  22. mcp_agent/ui/console_display.py +278 -0
  23. mcp_agent/workflows/llm/augmented_llm.py +245 -179
  24. mcp_agent/workflows/llm/augmented_llm_anthropic.py +49 -3
  25. mcp_agent/workflows/llm/augmented_llm_openai.py +52 -4
  26. {fast_agent_mcp-0.1.4.dist-info → fast_agent_mcp-0.1.5.dist-info}/WHEEL +0 -0
  27. {fast_agent_mcp-0.1.4.dist-info → fast_agent_mcp-0.1.5.dist-info}/entry_points.txt +0 -0
  28. {fast_agent_mcp-0.1.4.dist-info → fast_agent_mcp-0.1.5.dist-info}/licenses/LICENSE +0 -0
@@ -21,12 +21,13 @@ from mcp.client.stdio import (
21
21
  get_default_environment,
22
22
  )
23
23
  from mcp.client.sse import sse_client
24
- from mcp.types import JSONRPCMessage
24
+ from mcp.types import JSONRPCMessage, ServerCapabilities
25
25
 
26
26
  from mcp_agent.config import MCPServerSettings
27
27
  from mcp_agent.core.exceptions import ServerInitializationError
28
28
  from mcp_agent.event_progress import ProgressAction
29
29
  from mcp_agent.logging.logger import get_logger
30
+ from mcp_agent.mcp.mcp_agent_client_session import MCPAgentClientSession
30
31
  from mcp_agent.mcp.stdio import stdio_client_with_rich_stderr
31
32
  from mcp_agent.context_dependent import ContextDependent
32
33
 
@@ -107,8 +108,9 @@ class ServerConnection:
107
108
  Must be called within an async context.
108
109
  """
109
110
 
110
- await self.session.initialize()
111
+ result = await self.session.initialize()
111
112
 
113
+ self.server_capabilities = result.capabilities
112
114
  # If there's an init hook, run it
113
115
  if self._init_hook:
114
116
  logger.info(f"{self.server_name}: Executing init hook.")
@@ -333,6 +335,15 @@ class MCPConnectionManager(ContextDependent):
333
335
 
334
336
  return server_conn
335
337
 
338
+ async def get_server_capabilities(
339
+ self, server_name: str
340
+ ) -> ServerCapabilities | None:
341
+ """Get the capabilities of a specific server."""
342
+ server_conn = await self.get_server(
343
+ server_name, client_session_factory=MCPAgentClientSession
344
+ )
345
+ return server_conn.server_capabilities if server_conn else None
346
+
336
347
  async def disconnect_server(self, server_name: str) -> None:
337
348
  """
338
349
  Disconnect a specific server if it's running under this connection manager.
@@ -0,0 +1,4 @@
1
+ # Import and re-export AgentMCPServer to avoid circular imports
2
+ from mcp_agent.mcp_server.agent_server import AgentMCPServer
3
+
4
+ __all__ = ["AgentMCPServer"]
@@ -0,0 +1,121 @@
1
+ # src/mcp_agent/mcp_server/agent_server.py
2
+
3
+ from mcp.server.fastmcp import FastMCP
4
+
5
+ # Remove circular import
6
+ from mcp_agent.core.agent_app import AgentApp
7
+ from mcp.server.fastmcp import Context as MCPContext
8
+
9
+
10
+ class AgentMCPServer:
11
+ """Exposes FastAgent agents as MCP tools through an MCP server."""
12
+
13
+ def __init__(
14
+ self,
15
+ agent_app: AgentApp,
16
+ server_name: str = "FastAgent-MCP-Server",
17
+ server_description: str = None,
18
+ ):
19
+ self.agent_app = agent_app
20
+ self.mcp_server = FastMCP(
21
+ name=server_name,
22
+ instructions=server_description
23
+ or f"This server provides access to {len(agent_app.agents)} agents",
24
+ )
25
+ self.setup_tools()
26
+
27
+ def setup_tools(self):
28
+ """Register all agents as MCP tools."""
29
+ for agent_name, agent_proxy in self.agent_app._agents.items():
30
+ self.register_agent_tools(agent_name, agent_proxy)
31
+
32
+ def register_agent_tools(self, agent_name: str, agent_proxy):
33
+ """Register tools for a specific agent."""
34
+
35
+ # Basic send message tool
36
+ @self.mcp_server.tool(
37
+ name=f"{agent_name}.send",
38
+ description=f"Send a message to the {agent_name} agent",
39
+ )
40
+ async def send_message(message: str, ctx: MCPContext) -> str:
41
+ """Send a message to the agent and return its response."""
42
+
43
+ # Get the agent's context
44
+ agent_context = None
45
+ if hasattr(agent_proxy, "_agent") and hasattr(
46
+ agent_proxy._agent, "context"
47
+ ):
48
+ agent_context = agent_proxy._agent.context
49
+
50
+ # Define the function to execute
51
+ async def execute_send():
52
+ return await agent_proxy.send(message)
53
+
54
+ # Execute with bridged context
55
+ if agent_context and ctx:
56
+ return await self.with_bridged_context(agent_context, ctx, execute_send)
57
+ else:
58
+ return await execute_send()
59
+
60
+ def run(self, transport: str = "sse", host: str = "0.0.0.0", port: int = 8000):
61
+ """Run the MCP server."""
62
+ if transport == "sse":
63
+ # For running as a web server
64
+ self.mcp_server.settings.host = host
65
+ self.mcp_server.settings.port = port
66
+
67
+ self.mcp_server.run(transport=transport)
68
+
69
+ async def run_async(
70
+ self, transport: str = "sse", host: str = "0.0.0.0", port: int = 8000
71
+ ):
72
+ """Run the MCP server asynchronously."""
73
+ if transport == "sse":
74
+ self.mcp_server.settings.host = host
75
+ self.mcp_server.settings.port = port
76
+ await self.mcp_server.run_sse_async()
77
+ else: # stdio
78
+ await self.mcp_server.run_stdio_async()
79
+
80
+ async def with_bridged_context(
81
+ self, agent_context, mcp_context, func, *args, **kwargs
82
+ ):
83
+ """
84
+ Execute a function with bridged context between MCP and agent
85
+
86
+ Args:
87
+ agent_context: The agent's context object
88
+ mcp_context: The MCP context from the tool call
89
+ func: The function to execute
90
+ args, kwargs: Arguments to pass to the function
91
+ """
92
+ # Store original progress reporter if it exists
93
+ original_progress_reporter = None
94
+ if hasattr(agent_context, "progress_reporter"):
95
+ original_progress_reporter = agent_context.progress_reporter
96
+
97
+ # Store MCP context in agent context for nested calls
98
+ agent_context.mcp_context = mcp_context
99
+
100
+ # Create bridged progress reporter
101
+ async def bridged_progress(progress, total=None):
102
+ if mcp_context:
103
+ await mcp_context.report_progress(progress, total)
104
+ if original_progress_reporter:
105
+ await original_progress_reporter(progress, total)
106
+
107
+ # Install bridged progress reporter
108
+ if hasattr(agent_context, "progress_reporter"):
109
+ agent_context.progress_reporter = bridged_progress
110
+
111
+ try:
112
+ # Call the function
113
+ return await func(*args, **kwargs)
114
+ finally:
115
+ # Restore original progress reporter
116
+ if hasattr(agent_context, "progress_reporter"):
117
+ agent_context.progress_reporter = original_progress_reporter
118
+
119
+ # Remove MCP context reference
120
+ if hasattr(agent_context, "mcp_context"):
121
+ delattr(agent_context, "mcp_context")
@@ -0,0 +1,52 @@
1
+ default_model: sonnet
2
+
3
+ # on windows, adjust the mount point to be the full path e.g. x:/temp/data-analysis/mount-point:/mnt/data/
4
+
5
+ mcp:
6
+ servers:
7
+ interpreter:
8
+ command: "docker"
9
+ args:
10
+ [
11
+ "run",
12
+ "-i",
13
+ "--rm",
14
+ "--pull=always",
15
+ "-v",
16
+ "./mount-point:/mnt/data/",
17
+ "ghcr.io/evalstate/mcp-py-repl:latest",
18
+ ]
19
+ roots:
20
+ - uri: "file://./mount-point/"
21
+ name: "test_data"
22
+ server_uri_alias: "file:///mnt/data/"
23
+ filesystem:
24
+ # On windows update the command and arguments to use `node` and the absolute path to the server.
25
+ # Use `npm i -g @modelcontextprotocol/server-filesystem` to install the server globally.
26
+ # Use `npm -g root` to find the global node_modules path.`
27
+ # command: "node"
28
+ # args: ["c:/Program Files/nodejs/node_modules/@modelcontextprotocol/server-filesystem/dist/index.js","."]
29
+ command: "npx"
30
+ args:
31
+ [
32
+ "-y",
33
+ "@modelcontextprotocol/server-filesystem",
34
+ "src/mcp_agent/resources/examples/data-analysis/mount-point/",
35
+ ]
36
+ fetch:
37
+ command: "uvx"
38
+ args: ["mcp-server-fetch"]
39
+ brave:
40
+ # On windows replace the command and args line to use `node` and the absolute path to the server.
41
+ # Use `npm i -g @modelcontextprotocol/server-brave-search` to install the server globally.
42
+ # Use `npm -g root` to find the global node_modules path.`
43
+ # command: "node"
44
+ # args: ["c:/Program Files/nodejs/node_modules/@modelcontextprotocol/server-brave-search/dist/index.js"]
45
+ command: "npx"
46
+ args: ["-y", "@modelcontextprotocol/server-brave-search"]
47
+ sizer:
48
+ command: "uv"
49
+ args: ["run", "prompt_sizing.py"]
50
+ category:
51
+ command: "uv"
52
+ args: ["run", "prompt_category.py"]
@@ -0,0 +1,21 @@
1
+ from mcp.server.fastmcp import FastMCP
2
+ from mcp.server.fastmcp.prompts.base import UserMessage, AssistantMessage
3
+
4
+ mcp = FastMCP("MCP Root Tester")
5
+
6
+
7
+ @mcp.prompt(name="category_prompt", description="set up the category protocol")
8
+ def category_prompt():
9
+ return [
10
+ UserMessage("Cat"),
11
+ AssistantMessage("animal"),
12
+ UserMessage("dog"),
13
+ AssistantMessage("animal"),
14
+ UserMessage("quartz"),
15
+ AssistantMessage("mineral"),
16
+ # UserMessage("the sun"),
17
+ ]
18
+
19
+
20
+ if __name__ == "__main__":
21
+ mcp.run()
@@ -0,0 +1,53 @@
1
+ from pydantic import Field
2
+ from mcp.server.fastmcp import FastMCP
3
+ from mcp.server.fastmcp.prompts.base import UserMessage, AssistantMessage
4
+
5
+ mcp = FastMCP("MCP Prompt Tester")
6
+
7
+
8
+ @mcp.prompt(name="sizing_prompt", description="set up the sizing protocol")
9
+ def sizing_prompt():
10
+ return [
11
+ UserMessage("What is the size of the moon?"),
12
+ AssistantMessage("OBJECT: MOON\nSIZE: 3,474.8\nUNITS: KM\nTYPE: MINERAL"),
13
+ UserMessage("What is the size of the Earth?"),
14
+ AssistantMessage("OBJECT: EARTH\nSIZE: 12,742\nUNITS: KM\nTYPE: MINERAL"),
15
+ UserMessage("A tiger"),
16
+ AssistantMessage("OBJECT: TIGER\nSIZE: 1.2\nUNITS: M\nTYPE: ANIMAL"),
17
+ UserMessage("Domestic Cat"),
18
+ ]
19
+
20
+
21
+ @mcp.prompt(
22
+ name="sizing_prompt_units",
23
+ description="set up the sizing protocol with metric or imperial units",
24
+ )
25
+ def sizing_prompt_units(
26
+ metric: bool = Field(
27
+ description="Set to True for Metric, False for Imperial", default=True
28
+ ),
29
+ ):
30
+ if metric:
31
+ return [
32
+ UserMessage("What is the size of the moon?"),
33
+ AssistantMessage("OBJECT: MOON\nSIZE: 3,474.8\nUNITS: KM\nTYPE: MINERAL"),
34
+ UserMessage("What is the size of the Earth?"),
35
+ AssistantMessage("OBJECT: EARTH\nSIZE: 12,742\nUNITS: KM\nTYPE: MINERAL"),
36
+ UserMessage("A tiger"),
37
+ AssistantMessage("OBJECT: TIGER\nSIZE: 1.2\nUNITS: M\nTYPE: ANIMAL"),
38
+ UserMessage("Domestic Cat"),
39
+ ]
40
+ else:
41
+ return [
42
+ UserMessage("What is the size of the moon?"),
43
+ AssistantMessage("OBJECT: MOON\nSIZE: 2,159.1\nUNITS: MI\nTYPE: MINERAL"),
44
+ UserMessage("What is the size of the Earth?"),
45
+ AssistantMessage("OBJECT: EARTH\nSIZE: 7,918\nUNITS: MI\nTYPE: MINERAL"),
46
+ UserMessage("A tiger"),
47
+ AssistantMessage("OBJECT: TIGER\nSIZE: 3.9\nUNITS: FT\nTYPE: ANIMAL"),
48
+ UserMessage("Domestic Cat"),
49
+ ]
50
+
51
+
52
+ if __name__ == "__main__":
53
+ mcp.run()
@@ -0,0 +1,24 @@
1
+ import asyncio
2
+ from mcp_agent.core.fastagent import FastAgent
3
+
4
+ fast = FastAgent("Sizer Prompt Test")
5
+
6
+
7
+ @fast.agent(
8
+ "sizer",
9
+ "given an object return its size",
10
+ servers=["sizer", "category"],
11
+ use_history=True,
12
+ )
13
+ async def main():
14
+ async with fast.run() as agent:
15
+ # await agent["sizer"].load_prompt("sizing_prompt_units", {"metric": "False"})
16
+ # print(await agent["sizer"].load_prompt("category-category_prompt"))
17
+ # await agent("What is the size of the moon?")
18
+ # await agent("What is the size of the Earth?")
19
+ # await agent("What is the size of the Sun?")
20
+ await agent()
21
+
22
+
23
+ if __name__ == "__main__":
24
+ asyncio.run(main())
@@ -6,7 +6,7 @@
6
6
 
7
7
  execution_engine: asyncio
8
8
  logger:
9
- type: file
9
+ type: console
10
10
  level: error
11
11
  truncate_tools: true
12
12
 
@@ -51,3 +51,16 @@ mcp:
51
51
  fetch:
52
52
  command: "uvx"
53
53
  args: ["mcp-server-fetch"]
54
+ sequential:
55
+ command: "npx"
56
+ args: ["-y","@modelcontextprotocol/server-sequential-thinking"]
57
+
58
+ # webmcp:
59
+ # command: "node"
60
+ # args: ["/home/ssmith/.webmcp/server.cjs"]
61
+ # env:
62
+ # WEBMCP_SERVER_TOKEN: 96e22896d8143fc1d61fec09208fc5ed
63
+
64
+
65
+
66
+
@@ -0,0 +1,23 @@
1
+ # example_mcp_server.py
2
+ import asyncio
3
+
4
+ from mcp_agent.core.fastagent import FastAgent
5
+
6
+ # Define your agents as normal
7
+ fa = FastAgent("My Application")
8
+
9
+
10
+ @fa.agent("analyst", "hello, world", servers=["fetch"])
11
+
12
+ # Run the application with MCP server
13
+ async def main():
14
+ await fa.run_with_mcp_server(
15
+ transport="sse", # Use "sse" for web server, "stdio" for command line
16
+ port=8000,
17
+ server_name="MyAgents",
18
+ server_description="MCP Server exposing analyst and researcher agents",
19
+ )
20
+
21
+
22
+ if __name__ == "__main__":
23
+ asyncio.run(main())
@@ -0,0 +1,278 @@
1
+ from typing import Optional, Union
2
+
3
+ from mcp.types import CallToolResult
4
+ from rich.panel import Panel
5
+ from rich.text import Text
6
+
7
+ from mcp_agent import console
8
+ from mcp_agent.mcp.mcp_aggregator import SEP
9
+
10
+ # Constants
11
+ HUMAN_INPUT_TOOL_NAME = "__human_input__"
12
+
13
+
14
+ class ConsoleDisplay:
15
+ """
16
+ Handles displaying formatted messages, tool calls, and results to the console.
17
+ This centralizes the UI display logic used by LLM implementations.
18
+ """
19
+
20
+ def __init__(self, config=None):
21
+ """
22
+ Initialize the console display handler.
23
+
24
+ Args:
25
+ config: Configuration object containing display preferences
26
+ """
27
+ self.config = config
28
+
29
+ def show_tool_result(self, result: CallToolResult):
30
+ """Display a tool result in a formatted panel."""
31
+ if not self.config or not self.config.logger.show_tools:
32
+ return
33
+
34
+ style = "red" if result.isError else "magenta"
35
+
36
+ panel = Panel(
37
+ Text(str(result.content), overflow="..."),
38
+ title="[TOOL RESULT]",
39
+ title_align="right",
40
+ style=style,
41
+ border_style="bold white",
42
+ padding=(1, 2),
43
+ )
44
+
45
+ if self.config and self.config.logger.truncate_tools:
46
+ if len(str(result.content)) > 360:
47
+ panel.height = 8
48
+
49
+ console.console.print(panel)
50
+ console.console.print("\n")
51
+
52
+ def show_oai_tool_result(self, result):
53
+ """Display an OpenAI tool result in a formatted panel."""
54
+ if not self.config or not self.config.logger.show_tools:
55
+ return
56
+
57
+ panel = Panel(
58
+ Text(str(result), overflow="..."),
59
+ title="[TOOL RESULT]",
60
+ title_align="right",
61
+ style="magenta",
62
+ border_style="bold white",
63
+ padding=(1, 2),
64
+ )
65
+
66
+ if self.config and self.config.logger.truncate_tools:
67
+ if len(str(result)) > 360:
68
+ panel.height = 8
69
+
70
+ console.console.print(panel)
71
+ console.console.print("\n")
72
+
73
+ def show_tool_call(self, available_tools, tool_name, tool_args):
74
+ """Display a tool call in a formatted panel."""
75
+ if not self.config or not self.config.logger.show_tools:
76
+ return
77
+
78
+ display_tool_list = self._format_tool_list(available_tools, tool_name)
79
+
80
+ panel = Panel(
81
+ Text(str(tool_args), overflow="ellipsis"),
82
+ title="[TOOL CALL]",
83
+ title_align="left",
84
+ style="magenta",
85
+ border_style="bold white",
86
+ subtitle=display_tool_list,
87
+ subtitle_align="left",
88
+ padding=(1, 2),
89
+ )
90
+
91
+ if self.config and self.config.logger.truncate_tools:
92
+ if len(str(tool_args)) > 360:
93
+ panel.height = 8
94
+
95
+ console.console.print(panel)
96
+ console.console.print("\n")
97
+
98
+ def _format_tool_list(self, available_tools, selected_tool_name):
99
+ """Format the list of available tools, highlighting the selected one."""
100
+ display_tool_list = Text()
101
+ for display_tool in available_tools:
102
+ # Handle both OpenAI and Anthropic tool formats
103
+ if isinstance(display_tool, dict):
104
+ if "function" in display_tool:
105
+ # OpenAI format
106
+ tool_call_name = display_tool["function"]["name"]
107
+ else:
108
+ # Anthropic format
109
+ tool_call_name = display_tool["name"]
110
+ else:
111
+ # Handle potential object format (e.g., Pydantic models)
112
+ tool_call_name = (
113
+ display_tool.function.name
114
+ if hasattr(display_tool, "function")
115
+ else display_tool.name
116
+ )
117
+
118
+ parts = (
119
+ tool_call_name.split(SEP)
120
+ if SEP in tool_call_name
121
+ else [tool_call_name, tool_call_name]
122
+ )
123
+
124
+ if selected_tool_name.split(SEP)[0] == parts[0]:
125
+ style = (
126
+ "magenta" if tool_call_name == selected_tool_name else "dim white"
127
+ )
128
+ shortened_name = (
129
+ parts[1] if len(parts[1]) <= 12 else parts[1][:11] + "…"
130
+ )
131
+ display_tool_list.append(f"[{shortened_name}] ", style)
132
+
133
+ return display_tool_list
134
+
135
+ async def show_assistant_message(
136
+ self,
137
+ message_text: Union[str, Text],
138
+ aggregator=None,
139
+ highlight_namespaced_tool: str = "",
140
+ title: str = "ASSISTANT",
141
+ name: Optional[str] = None,
142
+ ):
143
+ """Display an assistant message in a formatted panel."""
144
+ if not self.config or not self.config.logger.show_chat:
145
+ return
146
+
147
+ display_server_list = Text()
148
+
149
+ if aggregator:
150
+ # Add human input tool if available
151
+ tools = await aggregator.list_tools()
152
+ if any(tool.name == HUMAN_INPUT_TOOL_NAME for tool in tools.tools):
153
+ style = (
154
+ "green"
155
+ if highlight_namespaced_tool == HUMAN_INPUT_TOOL_NAME
156
+ else "dim white"
157
+ )
158
+ display_server_list.append("[human] ", style)
159
+
160
+ # Add all available servers
161
+ mcp_server_name = (
162
+ highlight_namespaced_tool.split(SEP)[0]
163
+ if SEP in highlight_namespaced_tool
164
+ else highlight_namespaced_tool
165
+ )
166
+
167
+ for server_name in await aggregator.list_servers():
168
+ style = "green" if server_name == mcp_server_name else "dim white"
169
+ display_server_list.append(f"[{server_name}] ", style)
170
+
171
+ panel = Panel(
172
+ message_text,
173
+ title=f"[{title}]{f' ({name})' if name else ''}",
174
+ title_align="left",
175
+ style="green",
176
+ border_style="bold white",
177
+ padding=(1, 2),
178
+ subtitle=display_server_list,
179
+ subtitle_align="left",
180
+ )
181
+ console.console.print(panel)
182
+ console.console.print("\n")
183
+
184
+ def show_user_message(
185
+ self, message, model: Optional[str], chat_turn: int, name: Optional[str] = None
186
+ ):
187
+ """Display a user message in a formatted panel."""
188
+ if not self.config or not self.config.logger.show_chat:
189
+ return
190
+
191
+ panel = Panel(
192
+ message,
193
+ title=f"{f'({name}) [USER]' if name else '[USER]'}",
194
+ title_align="right",
195
+ style="blue",
196
+ border_style="bold white",
197
+ padding=(1, 2),
198
+ subtitle=Text(f"{model or 'unknown'} turn {chat_turn}", style="dim white"),
199
+ subtitle_align="left",
200
+ )
201
+ console.console.print(panel)
202
+ console.console.print("\n")
203
+
204
+ async def show_prompt_loaded(
205
+ self,
206
+ prompt_name: str,
207
+ description: Optional[str] = None,
208
+ message_count: int = 0,
209
+ agent_name: Optional[str] = None,
210
+ aggregator=None,
211
+ arguments: Optional[dict[str, str]] = None,
212
+ ):
213
+ """
214
+ Display information about a loaded prompt template.
215
+
216
+ Args:
217
+ prompt_name: The name of the prompt that was loaded (should be namespaced)
218
+ description: Optional description of the prompt
219
+ message_count: Number of messages added to the conversation history
220
+ agent_name: Name of the agent using the prompt
221
+ aggregator: Optional aggregator instance to use for server highlighting
222
+ arguments: Optional dictionary of arguments passed to the prompt template
223
+ """
224
+ if not self.config or not self.config.logger.show_tools:
225
+ return
226
+
227
+ # Get server name from the namespaced prompt_name
228
+ mcp_server_name = None
229
+ if SEP in prompt_name:
230
+ # Extract the server from the namespaced prompt name
231
+ mcp_server_name = prompt_name.split(SEP)[0]
232
+ elif aggregator and aggregator.server_names:
233
+ # Fallback to first server if not namespaced
234
+ mcp_server_name = aggregator.server_names[0]
235
+
236
+ # Build the server list with highlighting
237
+ display_server_list = Text()
238
+ if aggregator:
239
+ for server_name in await aggregator.list_servers():
240
+ style = "green" if server_name == mcp_server_name else "dim white"
241
+ display_server_list.append(f"[{server_name}] ", style)
242
+
243
+ # Create content text
244
+ content = Text()
245
+ messages_phrase = (
246
+ f"Loaded {message_count} message{'s' if message_count != 1 else ''}"
247
+ )
248
+ content.append(f"{messages_phrase} from template ", style="cyan italic")
249
+ content.append(f"'{prompt_name}'", style="cyan bold italic")
250
+
251
+ if agent_name:
252
+ content.append(f" for {agent_name}", style="cyan italic")
253
+
254
+ # Add template arguments if provided
255
+ if arguments:
256
+ content.append("\n\nArguments:", style="cyan")
257
+ for key, value in arguments.items():
258
+ content.append(f"\n {key}: ", style="cyan bold")
259
+ content.append(value, style="white")
260
+
261
+ if description:
262
+ content.append("\n\n", style="default")
263
+ content.append(description, style="dim white")
264
+
265
+ # Create panel
266
+ panel = Panel(
267
+ content,
268
+ title="[PROMPT LOADED]",
269
+ title_align="right",
270
+ style="cyan",
271
+ border_style="bold white",
272
+ padding=(1, 2),
273
+ subtitle=display_server_list,
274
+ subtitle_align="left",
275
+ )
276
+
277
+ console.console.print(panel)
278
+ console.console.print("\n")