fast-agent-mcp 0.0.11__py3-none-any.whl → 0.0.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of fast-agent-mcp might be problematic. Click here for more details.

Files changed (30) hide show
  1. {fast_agent_mcp-0.0.11.dist-info → fast_agent_mcp-0.0.13.dist-info}/METADATA +9 -1
  2. {fast_agent_mcp-0.0.11.dist-info → fast_agent_mcp-0.0.13.dist-info}/RECORD +30 -25
  3. mcp_agent/agents/agent.py +48 -8
  4. mcp_agent/cli/commands/bootstrap.py +2 -5
  5. mcp_agent/cli/commands/setup.py +1 -1
  6. mcp_agent/cli/main.py +6 -6
  7. mcp_agent/core/enhanced_prompt.py +358 -0
  8. mcp_agent/core/exceptions.py +17 -0
  9. mcp_agent/core/fastagent.py +108 -34
  10. mcp_agent/human_input/handler.py +43 -18
  11. mcp_agent/mcp/mcp_connection_manager.py +14 -12
  12. mcp_agent/resources/examples/internal/agent.py +17 -0
  13. mcp_agent/resources/examples/internal/job.py +1 -1
  14. mcp_agent/resources/examples/mcp_researcher/researcher-eval.py +1 -1
  15. mcp_agent/resources/examples/researcher/fastagent.config.yaml +53 -0
  16. mcp_agent/resources/examples/researcher/researcher-eval.py +53 -0
  17. mcp_agent/resources/examples/workflows/chaining.py +5 -1
  18. mcp_agent/resources/examples/workflows/evaluator.py +7 -4
  19. mcp_agent/resources/examples/workflows/fastagent.config.yaml +24 -0
  20. mcp_agent/resources/examples/workflows/orchestrator.py +3 -2
  21. mcp_agent/resources/examples/workflows/parallel.py +2 -1
  22. mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py +31 -30
  23. mcp_agent/workflows/llm/augmented_llm.py +8 -2
  24. mcp_agent/workflows/llm/augmented_llm_anthropic.py +3 -1
  25. mcp_agent/workflows/llm/augmented_llm_openai.py +20 -9
  26. mcp_agent/workflows/llm/model_factory.py +7 -4
  27. {fast_agent_mcp-0.0.11.dist-info → fast_agent_mcp-0.0.13.dist-info}/WHEEL +0 -0
  28. {fast_agent_mcp-0.0.11.dist-info → fast_agent_mcp-0.0.13.dist-info}/entry_points.txt +0 -0
  29. {fast_agent_mcp-0.0.11.dist-info → fast_agent_mcp-0.0.13.dist-info}/licenses/LICENSE +0 -0
  30. /mcp_agent/resources/examples/{mcp_researcher → researcher}/researcher.py +0 -0
@@ -1,6 +1,5 @@
1
1
  import asyncio
2
2
  from rich.panel import Panel
3
- from rich.prompt import Prompt
4
3
 
5
4
  from mcp_agent.console import console
6
5
  from mcp_agent.human_input.types import (
@@ -8,10 +7,11 @@ from mcp_agent.human_input.types import (
8
7
  HumanInputResponse,
9
8
  )
10
9
  from mcp_agent.progress_display import progress_display
10
+ from mcp_agent.core.enhanced_prompt import get_enhanced_input, handle_special_commands
11
11
 
12
12
 
13
13
  async def console_input_callback(request: HumanInputRequest) -> HumanInputResponse:
14
- """Request input from a human user via console using rich panel and prompt."""
14
+ """Request input from a human user via console using prompt_toolkit."""
15
15
 
16
16
  # Prepare the prompt text
17
17
  prompt_text = request.prompt
@@ -28,26 +28,51 @@ async def console_input_callback(request: HumanInputRequest) -> HumanInputRespon
28
28
  padding=(1, 2),
29
29
  )
30
30
 
31
+ # Extract agent name from metadata dictionary
32
+ agent_name = (
33
+ request.metadata.get("agent_name", "Unknown Agent")
34
+ if request.metadata
35
+ else "Unknown Agent"
36
+ )
37
+
31
38
  # Use the context manager to pause the progress display while getting input
32
39
  with progress_display.paused():
33
40
  console.print(panel)
34
41
 
35
- if request.timeout_seconds:
36
- try:
37
- loop = asyncio.get_event_loop()
38
- response = await asyncio.wait_for(
39
- loop.run_in_executor(
40
- None, lambda: Prompt.ask("Provide your response ")
41
- ),
42
- request.timeout_seconds,
42
+ try:
43
+ if request.timeout_seconds:
44
+ try:
45
+ # Use get_enhanced_input with empty agent list to disable agent switching
46
+ response = await asyncio.wait_for(
47
+ get_enhanced_input(
48
+ agent_name=agent_name,
49
+ available_agent_names=[], # No agents for selection
50
+ show_stop_hint=False,
51
+ is_human_input=True,
52
+ toolbar_color="ansimagenta",
53
+ ),
54
+ request.timeout_seconds,
55
+ )
56
+ except asyncio.TimeoutError:
57
+ console.print("\n[red]Timeout waiting for input[/red]")
58
+ raise TimeoutError("No response received within timeout period")
59
+ else:
60
+ response = await get_enhanced_input(
61
+ agent_name=agent_name,
62
+ available_agent_names=[], # No agents for selection
63
+ show_stop_hint=False,
64
+ is_human_input=True,
65
+ toolbar_color="ansimagenta",
43
66
  )
44
- except asyncio.TimeoutError:
45
- console.print("\n[red]Timeout waiting for input[/red]")
46
- raise TimeoutError("No response received within timeout period")
47
- else:
48
- loop = asyncio.get_event_loop()
49
- response = await loop.run_in_executor(
50
- None, lambda: Prompt.ask("Provide your response ")
51
- )
67
+
68
+ # if response and (response.startswith("/") or response.startswith("@")):
69
+ await handle_special_commands(response)
70
+
71
+ except KeyboardInterrupt:
72
+ console.print("\n[yellow]Input interrupted[/yellow]")
73
+ response = ""
74
+ except EOFError:
75
+ console.print("\n[yellow]Input terminated[/yellow]")
76
+ response = ""
52
77
 
53
78
  return HumanInputResponse(request_id=request.request_id, response=response.strip())
@@ -75,15 +75,15 @@ class ServerConnection:
75
75
 
76
76
  # Signal we want to shut down
77
77
  self._shutdown_event = Event()
78
-
78
+
79
79
  # Track error state
80
80
  self._error_occurred = False
81
81
  self._error_message = None
82
-
82
+
83
83
  def is_healthy(self) -> bool:
84
84
  """Check if the server connection is healthy and ready to use."""
85
85
  return self.session is not None and not self._error_occurred
86
-
86
+
87
87
  def reset_error_state(self) -> None:
88
88
  """Reset the error state, allowing reconnection attempts."""
89
89
  self._error_occurred = False
@@ -216,10 +216,10 @@ class MCPConnectionManager(ContextDependent):
216
216
  try:
217
217
  # First request all servers to shutdown
218
218
  await self.disconnect_all()
219
-
219
+
220
220
  # Add a small delay to allow for clean shutdown
221
221
  await asyncio.sleep(0.5)
222
-
222
+
223
223
  # Then close the task group if it's active
224
224
  if self._task_group_active:
225
225
  await self._task_group.__aexit__(exc_type, exc_val, exc_tb)
@@ -305,10 +305,12 @@ class MCPConnectionManager(ContextDependent):
305
305
  server_conn = self.running_servers.get(server_name)
306
306
  if server_conn and server_conn.is_healthy():
307
307
  return server_conn
308
-
308
+
309
309
  # If server exists but isn't healthy, remove it so we can create a new one
310
310
  if server_conn:
311
- logger.info(f"{server_name}: Server exists but is unhealthy, recreating...")
311
+ logger.info(
312
+ f"{server_name}: Server exists but is unhealthy, recreating..."
313
+ )
312
314
  self.running_servers.pop(server_name)
313
315
  server_conn.request_shutdown()
314
316
 
@@ -326,9 +328,9 @@ class MCPConnectionManager(ContextDependent):
326
328
  if not server_conn.is_healthy():
327
329
  error_msg = server_conn._error_message or "Unknown error"
328
330
  raise ServerInitializationError(
329
- f"{server_name}: Failed to initialize server: {error_msg}"
331
+ f"MCP Server: '{server_name}': Failed to initialize with error: '{error_msg}'. Check fastagent.config.yaml"
330
332
  )
331
-
333
+
332
334
  return server_conn
333
335
 
334
336
  async def disconnect_server(self, server_name: str) -> None:
@@ -353,16 +355,16 @@ class MCPConnectionManager(ContextDependent):
353
355
  """Disconnect all servers that are running under this connection manager."""
354
356
  # Get a copy of servers to shutdown
355
357
  servers_to_shutdown = []
356
-
358
+
357
359
  async with self._lock:
358
360
  if not self.running_servers:
359
361
  return
360
-
362
+
361
363
  # Make a copy of the servers to shut down
362
364
  servers_to_shutdown = list(self.running_servers.items())
363
365
  # Clear the dict immediately to prevent any new access
364
366
  self.running_servers.clear()
365
-
367
+
366
368
  # Release the lock before waiting for servers to shut down
367
369
  for name, conn in servers_to_shutdown:
368
370
  logger.info(f"{name}: Requesting shutdown...")
@@ -0,0 +1,17 @@
1
+ import asyncio
2
+ from mcp_agent.core.fastagent import FastAgent
3
+
4
+ # Create the application
5
+ fast = FastAgent("FastAgent Example")
6
+
7
+
8
+ # Define the agent
9
+ @fast.agent(servers=["fetch"])
10
+ async def main():
11
+ # use the --model command line switch or agent arguments to change model
12
+ async with fast.run() as agent:
13
+ await agent()
14
+
15
+
16
+ if __name__ == "__main__":
17
+ asyncio.run(main())
@@ -52,7 +52,7 @@ fast = FastAgent("PMO Job Description Generator")
52
52
  )
53
53
  @fast.evaluator_optimizer(
54
54
  name="job_description_writer",
55
- optimizer="content_generator",
55
+ generator="content_generator",
56
56
  evaluator="consistency_checker",
57
57
  min_rating="EXCELLENT",
58
58
  max_refinements=2,
@@ -35,7 +35,7 @@ Summarize your evaluation as a structured response with:
35
35
  - Specific feedback and areas for improvement.""",
36
36
  )
37
37
  @agents.evaluator_optimizer(
38
- optimizer="Researcher",
38
+ generator="Researcher",
39
39
  evaluator="Evaluator",
40
40
  max_refinements=5,
41
41
  min_rating="EXCELLENT",
@@ -0,0 +1,53 @@
1
+ #
2
+ # Please edit this configuration file to match your environment (on Windows).
3
+ # Examples in comments below - check/change the paths.
4
+ #
5
+ #
6
+
7
+ execution_engine: asyncio
8
+ logger:
9
+ type: file
10
+ level: error
11
+ truncate_tools: true
12
+
13
+ mcp:
14
+ servers:
15
+ brave:
16
+ # On windows replace the command and args line to use `node` and the absolute path to the server.
17
+ # Use `npm i -g @modelcontextprotocol/server-brave-search` to install the server globally.
18
+ # Use `npm -g root` to find the global node_modules path.`
19
+ # command: "node"
20
+ # args: ["c:/Program Files/nodejs/node_modules/@modelcontextprotocol/server-brave-search/dist/index.js"]
21
+ command: "npx"
22
+ args: ["-y", "@modelcontextprotocol/server-brave-search"]
23
+ env:
24
+ # You can also place your BRAVE_API_KEY in the fastagent.secrets.yaml file.
25
+ BRAVE_API_KEY: <your_brave_api_key>
26
+ filesystem:
27
+ # On windows update the command and arguments to use `node` and the absolute path to the server.
28
+ # Use `npm i -g @modelcontextprotocol/server-filesystem` to install the server globally.
29
+ # Use `npm -g root` to find the global node_modules path.`
30
+ # command: "node"
31
+ # args: ["c:/Program Files/nodejs/node_modules/@modelcontextprotocol/server-filesystem/dist/index.js","./agent_folder"]
32
+ command: "npx"
33
+ args: ["-y", "@modelcontextprotocol/server-filesystem", "./agent_folder/"]
34
+ interpreter:
35
+ command: "docker"
36
+ args: [
37
+ "run",
38
+ "-i",
39
+ "--rm",
40
+ "--pull=always",
41
+ "-v",
42
+ "./agent_folder:/mnt/data/",
43
+ # Docker needs the absolute path on Windows (e.g. "x:/fastagent/agent_folder:/mnt/data/")
44
+ # "./agent_folder:/mnt/data/",
45
+ "ghcr.io/evalstate/mcp-py-repl:latest",
46
+ ]
47
+ roots:
48
+ - uri: "file://./agent_folder/"
49
+ name: "agent_folder"
50
+ server_uri_alias: "file:///mnt/data/"
51
+ fetch:
52
+ command: "uvx"
53
+ args: ["mcp-server-fetch"]
@@ -0,0 +1,53 @@
1
+ import asyncio
2
+
3
+ from mcp_agent.core.fastagent import FastAgent
4
+
5
+ agents = FastAgent(name="Researcher")
6
+
7
+
8
+ @agents.agent(
9
+ name="Researcher",
10
+ instruction="""
11
+ You are a research assistant, with access to internet search (via Brave),
12
+ website fetch, a python interpreter (you can install packages with uv) and a filesystem.
13
+ Use the current working directory to save and create files with both the Interpreter and Filesystem tools.
14
+ The interpreter has numpy, pandas, matplotlib and seaborn already installed.
15
+
16
+ You must always provide a summary of the specific sources you have used in your research.
17
+ """,
18
+ servers=["brave", "interpreter", "filesystem", "fetch"],
19
+ )
20
+ @agents.agent(
21
+ name="Evaluator",
22
+ model="sonnet",
23
+ instruction="""
24
+ Evaluate the response from the researcher based on the criteria:
25
+ - Sources cited. Has the researcher provided a summary of the specific sources used in the research?
26
+ - Validity. Has the researcher cross-checked and validated data and assumptions.
27
+ - Alignment. Has the researher acted and addressed feedback from any previous assessments?
28
+
29
+ For each criterion:
30
+ - Provide a rating (EXCELLENT, GOOD, FAIR, or POOR).
31
+ - Offer specific feedback or suggestions for improvement.
32
+
33
+ Summarize your evaluation as a structured response with:
34
+ - Overall quality rating.
35
+ - Specific feedback and areas for improvement.""",
36
+ )
37
+ @agents.evaluator_optimizer(
38
+ generator="Researcher",
39
+ evaluator="Evaluator",
40
+ max_refinements=5,
41
+ min_rating="EXCELLENT",
42
+ name="Researcher_Evaluator",
43
+ )
44
+ async def main():
45
+ async with agents.run() as agent:
46
+ await agent.prompt("Researcher_Evaluator")
47
+
48
+ print("Ask follow up quesions to the Researcher?")
49
+ await agent.prompt("Researcher", default="STOP")
50
+
51
+
52
+ if __name__ == "__main__":
53
+ asyncio.run(main())
@@ -23,7 +23,11 @@ async def main():
23
23
  await agent.url_fetcher("http://llmindset.co.uk/resources/mcp-hfspace/")
24
24
  )
25
25
 
26
- # alternative syntax for above is agent["social_media"].send(message)
26
+
27
+ # uncomment below to interact with agents
28
+ # await agent()
29
+
30
+ # alternative syntax for above is agent["social_media"].send(message)
27
31
 
28
32
 
29
33
  if __name__ == "__main__":
@@ -9,9 +9,9 @@ from mcp_agent.core.fastagent import FastAgent
9
9
  fast = FastAgent("Evaluator-Optimizer")
10
10
 
11
11
 
12
- # Define optimizer agent
12
+ # Define generator agent
13
13
  @fast.agent(
14
- name="optimizer",
14
+ name="generator",
15
15
  instruction="""You are a career coach specializing in cover letter writing.
16
16
  You are tasked with generating a compelling cover letter given the job posting,
17
17
  candidate details, and company information. Tailor the response to the company and job requirements.
@@ -38,12 +38,13 @@ fast = FastAgent("Evaluator-Optimizer")
38
38
  Summarize your evaluation as a structured response with:
39
39
  - Overall quality rating.
40
40
  - Specific feedback and areas for improvement.""",
41
- model="sonnet",
41
+ # instructor doesn't seem to work for sonnet37
42
+ # model="sonnet35",
42
43
  )
43
44
  # Define the evaluator-optimizer workflow
44
45
  @fast.evaluator_optimizer(
45
46
  name="cover_letter_writer",
46
- optimizer="optimizer", # Reference to optimizer agent
47
+ generator="generator", # Reference to generator agent
47
48
  evaluator="evaluator", # Reference to evaluator agent
48
49
  min_rating="EXCELLENT", # Strive for excellence
49
50
  max_refinements=3, # Maximum iterations
@@ -70,6 +71,8 @@ async def main():
70
71
  f"Company information: {company_information}",
71
72
  )
72
73
 
74
+ await agent()
75
+
73
76
 
74
77
  if __name__ == "__main__":
75
78
  asyncio.run(main())
@@ -0,0 +1,24 @@
1
+ # Please edit this configuration file to match your environment (on Windows).
2
+ # Examples in comments below - check/change the paths.
3
+ #
4
+ #
5
+
6
+ execution_engine: asyncio
7
+ logger:
8
+ type: file
9
+ level: error
10
+ truncate_tools: true
11
+
12
+ mcp:
13
+ servers:
14
+ filesystem:
15
+ # On windows update the command and arguments to use `node` and the absolute path to the server.
16
+ # Use `npm i -g @modelcontextprotocol/server-filesystem` to install the server globally.
17
+ # Use `npm -g root` to find the global node_modules path.`
18
+ # command: "node"
19
+ # args: ["c:/Program Files/nodejs/node_modules/@modelcontextprotocol/server-filesystem/dist/index.js","."]
20
+ command: "npx"
21
+ args: ["-y", "@modelcontextprotocol/server-filesystem", "."]
22
+ fetch:
23
+ command: "uvx"
24
+ args: ["mcp-server-fetch"]
@@ -45,10 +45,11 @@ fast = FastAgent("Orchestrator-Workers")
45
45
  @fast.orchestrator(
46
46
  name="orchestrate",
47
47
  agents=["finder", "writer", "proofreader"],
48
- model="sonnet",
48
+ plan_type="iterative",
49
49
  )
50
50
  async def main():
51
51
  async with fast.run() as agent:
52
+
52
53
  await agent.author(
53
54
  "write a 250 word short story about kittens discovering a castle, and save it to short_story.md"
54
55
  )
@@ -65,7 +66,7 @@ async def main():
65
66
 
66
67
  # Send the task
67
68
  await agent.orchestrate(task)
68
-
69
+ await agent()
69
70
 
70
71
  if __name__ == "__main__":
71
72
  asyncio.run(main())
@@ -69,8 +69,9 @@ async def main():
69
69
  # Use the app's context manager
70
70
  async with fast.run() as agent:
71
71
  await agent.parallel(f"student short story submission: {SHORT_STORY}")
72
+
72
73
  # follow-on prompt to task agent
73
- # await agent.style_enforcer.prompt(default="STOP")
74
+ await agent.style_enforcer.prompt(default_prompt="STOP")
74
75
 
75
76
 
76
77
  if __name__ == "__main__":
@@ -66,18 +66,19 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
66
66
 
67
67
  def __init__(
68
68
  self,
69
- optimizer: Agent | AugmentedLLM,
69
+ generator: Agent | AugmentedLLM,
70
70
  evaluator: str | Agent | AugmentedLLM,
71
71
  min_rating: QualityRating = QualityRating.GOOD,
72
72
  max_refinements: int = 3,
73
- llm_factory: Callable[[Agent], AugmentedLLM] | None = None, # TODO: Remove legacy - factory should only be needed for str evaluator
73
+ llm_factory: Callable[[Agent], AugmentedLLM]
74
+ | None = None, # TODO: Remove legacy - factory should only be needed for str evaluator
74
75
  context: Optional["Context"] = None,
75
76
  ):
76
77
  """
77
78
  Initialize the evaluator-optimizer workflow.
78
79
 
79
80
  Args:
80
- optimizer: The agent/LLM/workflow that generates responses. Can be:
81
+ generator: The agent/LLM/workflow that generates responses. Can be:
81
82
  - An Agent that will be converted to an AugmentedLLM
82
83
  - An AugmentedLLM instance
83
84
  - An Orchestrator/Router/ParallelLLM workflow
@@ -90,38 +91,38 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
90
91
  super().__init__(context=context)
91
92
 
92
93
  # Set up the optimizer
93
- self.name = optimizer.name
94
+ self.name = generator.name
94
95
  self.llm_factory = llm_factory
95
- self.optimizer = optimizer
96
+ self.generator = generator
96
97
  self.evaluator = evaluator
97
98
 
98
99
  # TODO: Remove legacy - optimizer should always be an AugmentedLLM, no conversion needed
99
- if isinstance(optimizer, Agent):
100
+ if isinstance(generator, Agent):
100
101
  if not llm_factory:
101
102
  raise ValueError("llm_factory is required when using an Agent")
102
103
 
103
104
  # Only create new LLM if agent doesn't have one
104
- if hasattr(optimizer, "_llm") and optimizer._llm:
105
- self.optimizer_llm = optimizer._llm
105
+ if hasattr(generator, "_llm") and generator._llm:
106
+ self.generator_llm = generator._llm
106
107
  else:
107
- self.optimizer_llm = llm_factory(agent=optimizer)
108
-
109
- self.aggregator = optimizer
108
+ self.generator_llm = llm_factory(agent=generator)
109
+
110
+ self.aggregator = generator
110
111
  self.instruction = (
111
- optimizer.instruction
112
- if isinstance(optimizer.instruction, str)
112
+ generator.instruction
113
+ if isinstance(generator.instruction, str)
113
114
  else None
114
115
  )
115
116
 
116
- elif isinstance(optimizer, AugmentedLLM):
117
- self.optimizer_llm = optimizer
118
- self.aggregator = optimizer.aggregator
119
- self.instruction = optimizer.instruction
117
+ elif isinstance(generator, AugmentedLLM):
118
+ self.generator_llm = generator
119
+ self.aggregator = generator.aggregator
120
+ self.instruction = generator.instruction
120
121
 
121
122
  else:
122
- raise ValueError(f"Unsupported optimizer type: {type(optimizer)}")
123
+ raise ValueError(f"Unsupported optimizer type: {type(generator)}")
123
124
 
124
- self.history = self.optimizer_llm.history
125
+ self.history = self.generator_llm.history
125
126
 
126
127
  # Set up the evaluator
127
128
  if isinstance(evaluator, AugmentedLLM):
@@ -169,17 +170,17 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
169
170
  best_response = None
170
171
  best_rating = QualityRating.POOR
171
172
  self.refinement_history = []
172
-
173
+
173
174
  # Use a single AsyncExitStack for the entire method to maintain connections
174
175
  async with contextlib.AsyncExitStack() as stack:
175
176
  # Enter all agent contexts once at the beginning
176
- if isinstance(self.optimizer, Agent):
177
- await stack.enter_async_context(self.optimizer)
177
+ if isinstance(self.generator, Agent):
178
+ await stack.enter_async_context(self.generator)
178
179
  if isinstance(self.evaluator, Agent):
179
180
  await stack.enter_async_context(self.evaluator)
180
-
181
+
181
182
  # Initial generation
182
- response = await self.optimizer_llm.generate(
183
+ response = await self.generator_llm.generate(
183
184
  message=message,
184
185
  request_params=request_params,
185
186
  )
@@ -251,7 +252,7 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
251
252
  )
252
253
 
253
254
  # No nested AsyncExitStack here either
254
- response = await self.optimizer_llm.generate(
255
+ response = await self.generator_llm.generate(
255
256
  message=refinement_prompt,
256
257
  request_params=request_params,
257
258
  )
@@ -274,13 +275,13 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
274
275
  # Handle case where response is a single message
275
276
  if not isinstance(response, list):
276
277
  return str(response)
277
-
278
+
278
279
  # Convert all messages to strings, handling different message types
279
280
  result_strings = []
280
281
  for r in response:
281
- if hasattr(r, 'text'):
282
+ if hasattr(r, "text"):
282
283
  result_strings.append(r.text)
283
- elif hasattr(r, 'content'):
284
+ elif hasattr(r, "content"):
284
285
  # Handle ToolUseBlock and similar
285
286
  if isinstance(r.content, list):
286
287
  # Typically content is a list of blocks
@@ -290,7 +291,7 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
290
291
  else:
291
292
  # Fallback to string representation
292
293
  result_strings.append(str(r))
293
-
294
+
294
295
  return "\n".join(result_strings)
295
296
 
296
297
  async def generate_structured(
@@ -304,7 +305,7 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
304
305
  message=message, request_params=request_params
305
306
  )
306
307
 
307
- return await self.optimizer.generate_structured(
308
+ return await self.generator.generate_structured(
308
309
  message=response_str,
309
310
  response_model=response_model,
310
311
  request_params=request_params,
@@ -15,6 +15,7 @@ from mcp.types import (
15
15
  )
16
16
 
17
17
  from mcp_agent.context_dependent import ContextDependent
18
+ from mcp_agent.core.exceptions import PromptExitError
18
19
  from mcp_agent.event_progress import ProgressAction
19
20
  from mcp_agent.mcp.mcp_aggregator import MCPAggregator, SEP
20
21
  from mcp_agent.workflows.llm.llm_selector import ModelSelector
@@ -495,7 +496,10 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol[MessageParamT, Message
495
496
  console.console.print("\n")
496
497
 
497
498
  async def show_assistant_message(
498
- self, message_text: str | Text, highlight_namespaced_tool: str = ""
499
+ self,
500
+ message_text: str | Text,
501
+ highlight_namespaced_tool: str = "",
502
+ title: str = "ASSISTANT",
499
503
  ):
500
504
  """Display an assistant message in a formatted panel."""
501
505
 
@@ -525,7 +529,7 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol[MessageParamT, Message
525
529
 
526
530
  panel = Panel(
527
531
  message_text,
528
- title=f"[ASSISTANT]{f' ({self.name})' if self.name else ''}",
532
+ title=f"[{title}]{f' ({self.name})' if self.name else ''}",
529
533
  title_align="left",
530
534
  style="green",
531
535
  border_style="bold white",
@@ -605,6 +609,8 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol[MessageParamT, Message
605
609
  result = postprocess
606
610
 
607
611
  return result
612
+ except PromptExitError:
613
+ raise
608
614
  except Exception as e:
609
615
  return CallToolResult(
610
616
  isError=True,
@@ -331,7 +331,9 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
331
331
  messages=[{"role": "user", "content": response}],
332
332
  max_tokens=params.maxTokens,
333
333
  )
334
-
334
+ await self.show_assistant_message(
335
+ str(structured_response), title="ASSISTANT/STRUCTURED"
336
+ )
335
337
  return structured_response
336
338
 
337
339
  @classmethod