fast-agent-mcp 0.3.14__py3-none-any.whl → 0.3.16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of fast-agent-mcp might be problematic. Click here for more details.

Files changed (49) hide show
  1. fast_agent/__init__.py +2 -0
  2. fast_agent/agents/agent_types.py +5 -0
  3. fast_agent/agents/llm_agent.py +52 -4
  4. fast_agent/agents/llm_decorator.py +6 -0
  5. fast_agent/agents/mcp_agent.py +137 -13
  6. fast_agent/agents/tool_agent.py +33 -19
  7. fast_agent/agents/workflow/router_agent.py +2 -1
  8. fast_agent/cli/__main__.py +35 -0
  9. fast_agent/cli/commands/check_config.py +90 -2
  10. fast_agent/cli/commands/go.py +100 -36
  11. fast_agent/cli/constants.py +13 -1
  12. fast_agent/cli/main.py +1 -0
  13. fast_agent/config.py +41 -12
  14. fast_agent/constants.py +8 -0
  15. fast_agent/context.py +24 -15
  16. fast_agent/core/direct_decorators.py +9 -0
  17. fast_agent/core/fastagent.py +115 -2
  18. fast_agent/core/logging/listeners.py +8 -0
  19. fast_agent/core/validation.py +31 -33
  20. fast_agent/human_input/form_fields.py +4 -1
  21. fast_agent/interfaces.py +12 -1
  22. fast_agent/llm/fastagent_llm.py +76 -0
  23. fast_agent/llm/memory.py +26 -1
  24. fast_agent/llm/model_database.py +2 -2
  25. fast_agent/llm/model_factory.py +4 -1
  26. fast_agent/llm/provider/anthropic/llm_anthropic.py +112 -0
  27. fast_agent/llm/provider/openai/llm_openai.py +184 -18
  28. fast_agent/llm/provider/openai/responses.py +133 -0
  29. fast_agent/mcp/prompt_message_extended.py +2 -2
  30. fast_agent/resources/setup/agent.py +2 -0
  31. fast_agent/resources/setup/fastagent.config.yaml +11 -4
  32. fast_agent/skills/__init__.py +9 -0
  33. fast_agent/skills/registry.py +200 -0
  34. fast_agent/tools/shell_runtime.py +404 -0
  35. fast_agent/ui/console_display.py +925 -73
  36. fast_agent/ui/elicitation_form.py +98 -24
  37. fast_agent/ui/elicitation_style.py +2 -2
  38. fast_agent/ui/enhanced_prompt.py +128 -26
  39. fast_agent/ui/history_display.py +20 -5
  40. fast_agent/ui/interactive_prompt.py +108 -3
  41. fast_agent/ui/markdown_truncator.py +942 -0
  42. fast_agent/ui/mcp_display.py +2 -2
  43. fast_agent/ui/plain_text_truncator.py +68 -0
  44. fast_agent/ui/streaming_buffer.py +449 -0
  45. {fast_agent_mcp-0.3.14.dist-info → fast_agent_mcp-0.3.16.dist-info}/METADATA +9 -7
  46. {fast_agent_mcp-0.3.14.dist-info → fast_agent_mcp-0.3.16.dist-info}/RECORD +49 -42
  47. {fast_agent_mcp-0.3.14.dist-info → fast_agent_mcp-0.3.16.dist-info}/WHEEL +0 -0
  48. {fast_agent_mcp-0.3.14.dist-info → fast_agent_mcp-0.3.16.dist-info}/entry_points.txt +0 -0
  49. {fast_agent_mcp-0.3.14.dist-info → fast_agent_mcp-0.3.16.dist-info}/licenses/LICENSE +0 -0
fast_agent/__init__.py CHANGED
@@ -23,6 +23,7 @@ from fast_agent.config import (
23
23
  OpenRouterSettings,
24
24
  OpenTelemetrySettings,
25
25
  Settings,
26
+ SkillsSettings,
26
27
  TensorZeroSettings,
27
28
  XAISettings,
28
29
  )
@@ -126,6 +127,7 @@ __all__ = [
126
127
  "BedrockSettings",
127
128
  "HuggingFaceSettings",
128
129
  "LoggerSettings",
130
+ "SkillsSettings",
129
131
  # Progress and event tracking (lazy loaded)
130
132
  "ProgressAction",
131
133
  "ProgressEvent",
@@ -4,10 +4,13 @@ Type definitions for agents and agent configurations.
4
4
 
5
5
  from dataclasses import dataclass, field
6
6
  from enum import StrEnum, auto
7
+ from pathlib import Path
7
8
  from typing import Dict, List, Optional
8
9
 
9
10
  from mcp.client.session import ElicitationFnT
10
11
 
12
+ from fast_agent.skills import SkillManifest, SkillRegistry
13
+
11
14
  # Forward imports to avoid circular dependencies
12
15
  from fast_agent.types import RequestParams
13
16
 
@@ -36,6 +39,8 @@ class AgentConfig:
36
39
  tools: Optional[Dict[str, List[str]]] = None
37
40
  resources: Optional[Dict[str, List[str]]] = None
38
41
  prompts: Optional[Dict[str, List[str]]] = None
42
+ skills: SkillManifest | SkillRegistry | Path | str | None = None
43
+ skill_manifests: List[SkillManifest] = field(default_factory=list, repr=False)
39
44
  model: str | None = None
40
45
  use_history: bool = True
41
46
  default_request_params: RequestParams | None = None
@@ -8,7 +8,7 @@ This class extends LlmDecorator with LLM-specific interaction behaviors includin
8
8
  - Chat display integration
9
9
  """
10
10
 
11
- from typing import List, Optional, Tuple
11
+ from typing import Callable, List, Optional, Tuple
12
12
 
13
13
  try:
14
14
  from a2a.types import AgentCapabilities # type: ignore
@@ -218,6 +218,13 @@ class LlmAgent(LlmDecorator):
218
218
  chat_turn = self._llm.chat_turn()
219
219
  self.display.show_user_message(message.last_text() or "", model, chat_turn, name=self.name)
220
220
 
221
+ def _should_stream(self) -> bool:
222
+ """Determine whether streaming display should be used."""
223
+ if getattr(self, "display", None):
224
+ enabled, _ = self.display.resolve_streaming_preferences()
225
+ return enabled
226
+ return True
227
+
221
228
  async def generate_impl(
222
229
  self,
223
230
  messages: List[PromptMessageExtended],
@@ -232,11 +239,52 @@ class LlmAgent(LlmDecorator):
232
239
  self.show_user_message(message=messages[-1])
233
240
 
234
241
  # TODO - manage error catch, recovery, pause
235
- result, summary = await self._generate_with_summary(messages, request_params, tools)
242
+ summary_text: Text | None = None
236
243
 
237
- summary_text = Text(f"\n\n{summary.message}", style="dim red italic") if summary else None
244
+ if self._should_stream():
245
+ display_name = self.name
246
+ display_model = self.llm.model_name if self._llm else None
247
+
248
+ remove_listener: Callable[[], None] | None = None
249
+ remove_tool_listener: Callable[[], None] | None = None
250
+
251
+ with self.display.streaming_assistant_message(
252
+ name=display_name,
253
+ model=display_model,
254
+ ) as stream_handle:
255
+ try:
256
+ remove_listener = self.llm.add_stream_listener(stream_handle.update)
257
+ remove_tool_listener = self.llm.add_tool_stream_listener(
258
+ stream_handle.handle_tool_event
259
+ )
260
+ except Exception:
261
+ remove_listener = None
262
+ remove_tool_listener = None
263
+
264
+ try:
265
+ result, summary = await self._generate_with_summary(
266
+ messages, request_params, tools
267
+ )
268
+ finally:
269
+ if remove_listener:
270
+ remove_listener()
271
+ if remove_tool_listener:
272
+ remove_tool_listener()
273
+
274
+ if summary:
275
+ summary_text = Text(f"\n\n{summary.message}", style="dim red italic")
276
+
277
+ stream_handle.finalize(result)
278
+
279
+ await self.show_assistant_message(result, additional_message=summary_text)
280
+ else:
281
+ result, summary = await self._generate_with_summary(messages, request_params, tools)
282
+
283
+ summary_text = (
284
+ Text(f"\n\n{summary.message}", style="dim red italic") if summary else None
285
+ )
286
+ await self.show_assistant_message(result, additional_message=summary_text)
238
287
 
239
- await self.show_assistant_message(result, additional_message=summary_text)
240
288
  return result
241
289
 
242
290
  async def structured_impl(
@@ -718,6 +718,12 @@ class LlmDecorator(AgentProtocol):
718
718
  return self._llm.message_history
719
719
  return []
720
720
 
721
+ def pop_last_message(self) -> PromptMessageExtended | None:
722
+ """Remove and return the most recent message from the conversation history."""
723
+ if self._llm:
724
+ return self._llm.pop_last_message()
725
+ return None
726
+
721
727
  @property
722
728
  def usage_accumulator(self) -> UsageAccumulator | None:
723
729
  """
@@ -42,12 +42,15 @@ from fast_agent.core.exceptions import PromptExitError
42
42
  from fast_agent.core.logging.logger import get_logger
43
43
  from fast_agent.interfaces import FastAgentLLMProtocol
44
44
  from fast_agent.mcp.mcp_aggregator import MCPAggregator, ServerStatus
45
+ from fast_agent.skills.registry import format_skills_for_prompt
45
46
  from fast_agent.tools.elicitation import (
46
47
  get_elicitation_tool,
47
48
  run_elicitation_form,
48
49
  set_elicitation_input_callback,
49
50
  )
51
+ from fast_agent.tools.shell_runtime import ShellRuntime
50
52
  from fast_agent.types import PromptMessageExtended, RequestParams
53
+ from fast_agent.ui import console
51
54
 
52
55
  # Define a TypeVar for models
53
56
  ModelT = TypeVar("ModelT", bound=BaseModel)
@@ -59,6 +62,7 @@ if TYPE_CHECKING:
59
62
 
60
63
  from fast_agent.context import Context
61
64
  from fast_agent.llm.usage_tracking import UsageAccumulator
65
+ from fast_agent.skills import SkillManifest
62
66
 
63
67
 
64
68
  class McpAgent(ABC, ToolAgent):
@@ -73,7 +77,6 @@ class McpAgent(ABC, ToolAgent):
73
77
  self,
74
78
  config: AgentConfig,
75
79
  connection_persistence: bool = True,
76
- # legacy human_input_callback removed
77
80
  context: "Context | None" = None,
78
81
  **kwargs,
79
82
  ) -> None:
@@ -96,6 +99,69 @@ class McpAgent(ABC, ToolAgent):
96
99
  self.instruction = self.config.instruction
97
100
  self.executor = context.executor if context else None
98
101
  self.logger = get_logger(f"{__name__}.{self._name}")
102
+ manifests: List[SkillManifest] = list(getattr(self.config, "skill_manifests", []) or [])
103
+ if not manifests and context and getattr(context, "skill_registry", None):
104
+ try:
105
+ manifests = list(context.skill_registry.load_manifests()) # type: ignore[assignment]
106
+ except Exception:
107
+ manifests = []
108
+
109
+ self._skill_manifests = list(manifests)
110
+ self._skill_map: Dict[str, SkillManifest] = {
111
+ manifest.name: manifest for manifest in manifests
112
+ }
113
+ self._agent_skills_warning_shown = False
114
+ shell_flag_requested = bool(context and getattr(context, "shell_runtime", False))
115
+ skills_configured = bool(self._skill_manifests)
116
+ self._shell_runtime_activation_reason: str | None = None
117
+
118
+ if shell_flag_requested and skills_configured:
119
+ self._shell_runtime_activation_reason = (
120
+ "via --shell flag and agent skills configuration"
121
+ )
122
+ elif shell_flag_requested:
123
+ self._shell_runtime_activation_reason = "via --shell flag"
124
+ elif skills_configured:
125
+ self._shell_runtime_activation_reason = "because agent skills are configured"
126
+
127
+ # Get timeout configuration from context
128
+ timeout_seconds = 90 # default
129
+ warning_interval_seconds = 30 # default
130
+ if context and context.config:
131
+ shell_config = getattr(context.config, "shell_execution", None)
132
+ if shell_config:
133
+ timeout_seconds = getattr(shell_config, "timeout_seconds", 90)
134
+ warning_interval_seconds = getattr(shell_config, "warning_interval_seconds", 30)
135
+
136
+ # Derive skills directory from this agent's manifests (respects per-agent config)
137
+ skills_directory = None
138
+ if self._skill_manifests:
139
+ # Get the skills directory from the first manifest's path
140
+ # Path structure: .fast-agent/skills/skill-name/SKILL.md
141
+ # So we need parent.parent of the manifest path
142
+ first_manifest = self._skill_manifests[0]
143
+ if first_manifest.path:
144
+ skills_directory = first_manifest.path.parent.parent
145
+
146
+ self._shell_runtime = ShellRuntime(
147
+ self._shell_runtime_activation_reason,
148
+ self.logger,
149
+ timeout_seconds=timeout_seconds,
150
+ warning_interval_seconds=warning_interval_seconds,
151
+ skills_directory=skills_directory,
152
+ )
153
+ self._shell_runtime_enabled = self._shell_runtime.enabled
154
+ self._shell_access_modes: tuple[str, ...] = ()
155
+ if self._shell_runtime_enabled:
156
+ modes: list[str] = ["[red]direct[/red]"]
157
+ if skills_configured:
158
+ modes.append("skills")
159
+ if shell_flag_requested:
160
+ modes.append("command switch")
161
+ self._shell_access_modes = tuple(modes)
162
+ self._bash_tool = self._shell_runtime.tool
163
+ if self._shell_runtime_enabled:
164
+ self._shell_runtime.announce()
99
165
 
100
166
  # Store the default request params from config
101
167
  self._default_request_params = self.config.default_request_params
@@ -207,6 +273,24 @@ class McpAgent(ABC, ToolAgent):
207
273
  "{{serverInstructions}}", server_instructions
208
274
  )
209
275
 
276
+ skills_placeholder_present = "{{agentSkills}}" in self.instruction
277
+
278
+ if skills_placeholder_present:
279
+ agent_skills = format_skills_for_prompt(self._skill_manifests)
280
+ self.instruction = self.instruction.replace("{{agentSkills}}", agent_skills)
281
+ self._agent_skills_warning_shown = True
282
+ elif self._skill_manifests and not self._agent_skills_warning_shown:
283
+ warning_message = (
284
+ "Agent skills are configured but the system prompt does not include {{agentSkills}}. "
285
+ "Skill descriptions will not be added to the system prompt."
286
+ )
287
+ self.logger.warning(warning_message)
288
+ try:
289
+ console.console.print(f"[yellow]{warning_message}[/yellow]")
290
+ except Exception: # pragma: no cover - console fallback
291
+ pass
292
+ self._agent_skills_warning_shown = True
293
+
210
294
  # Update default request params to match
211
295
  if self._default_request_params:
212
296
  self._default_request_params.systemPrompt = self.instruction
@@ -315,11 +399,12 @@ class McpAgent(ABC, ToolAgent):
315
399
  """
316
400
  # Get all tools from the aggregator
317
401
  result = await self._aggregator.list_tools()
402
+ aggregator_tools = list(result.tools)
318
403
 
319
404
  # Apply filtering if tools are specified in config
320
405
  if self.config.tools is not None:
321
406
  filtered_tools = []
322
- for tool in result.tools:
407
+ for tool in aggregator_tools:
323
408
  # Extract server name from tool name, handling server names with hyphens
324
409
  server_name = None
325
410
  for configured_server in self.config.tools.keys():
@@ -334,7 +419,12 @@ class McpAgent(ABC, ToolAgent):
334
419
  if self._matches_pattern(tool.name, pattern, server_name):
335
420
  filtered_tools.append(tool)
336
421
  break
337
- result.tools = filtered_tools
422
+ aggregator_tools = filtered_tools
423
+
424
+ result.tools = aggregator_tools
425
+
426
+ if self._bash_tool and all(tool.name != self._bash_tool.name for tool in result.tools):
427
+ result.tools.append(self._bash_tool)
338
428
 
339
429
  # Append human input tool if enabled and available
340
430
  if self.config.human_input and getattr(self, "_human_input_tool", None):
@@ -353,6 +443,9 @@ class McpAgent(ABC, ToolAgent):
353
443
  Returns:
354
444
  Result of the tool call
355
445
  """
446
+ if self._shell_runtime.tool and name == self._shell_runtime.tool.name:
447
+ return await self._shell_runtime.execute(arguments)
448
+
356
449
  if name == HUMAN_INPUT_TOOL_NAME:
357
450
  # Call the elicitation-backed human input tool
358
451
  return await self._call_human_input_tool(arguments)
@@ -608,13 +701,17 @@ class McpAgent(ABC, ToolAgent):
608
701
  return PromptMessageExtended(role="user", tool_results={})
609
702
 
610
703
  tool_results: dict[str, CallToolResult] = {}
611
- self._tool_loop_error = None
704
+ tool_loop_error: str | None = None
612
705
 
613
706
  # Cache available tool names (original, not namespaced) for display
614
707
  available_tools = [
615
708
  namespaced_tool.tool.name
616
709
  for namespaced_tool in self._aggregator._namespaced_tool_map.values()
617
710
  ]
711
+ if self._shell_runtime.tool:
712
+ available_tools.append(self._shell_runtime.tool.name)
713
+
714
+ available_tools = list(dict.fromkeys(available_tools))
618
715
 
619
716
  # Process each tool call using our aggregator
620
717
  for correlation_id, tool_request in request.tool_calls.items():
@@ -628,6 +725,8 @@ class McpAgent(ABC, ToolAgent):
628
725
  tool_available = False
629
726
  if tool_name == HUMAN_INPUT_TOOL_NAME:
630
727
  tool_available = True
728
+ elif self._bash_tool and tool_name == self._bash_tool.name:
729
+ tool_available = True
631
730
  elif namespaced_tool:
632
731
  tool_available = True
633
732
  else:
@@ -639,7 +738,7 @@ class McpAgent(ABC, ToolAgent):
639
738
  if not tool_available:
640
739
  error_message = f"Tool '{display_tool_name}' is not available"
641
740
  self.logger.error(error_message)
642
- self._mark_tool_loop_error(
741
+ tool_loop_error = self._mark_tool_loop_error(
643
742
  correlation_id=correlation_id,
644
743
  error_message=error_message,
645
744
  tool_results=tool_results,
@@ -654,6 +753,14 @@ class McpAgent(ABC, ToolAgent):
654
753
  # Tool not found in list, no highlighting
655
754
  pass
656
755
 
756
+ metadata: dict[str, Any] | None = None
757
+ if (
758
+ self._shell_runtime_enabled
759
+ and self._shell_runtime.tool
760
+ and display_tool_name == self._shell_runtime.tool.name
761
+ ):
762
+ metadata = self._shell_runtime.metadata(tool_args.get("command"))
763
+
657
764
  self.display.show_tool_call(
658
765
  name=self._name,
659
766
  tool_args=tool_args,
@@ -661,10 +768,11 @@ class McpAgent(ABC, ToolAgent):
661
768
  tool_name=display_tool_name,
662
769
  highlight_index=highlight_index,
663
770
  max_item_length=12,
771
+ metadata=metadata,
664
772
  )
665
773
 
666
774
  try:
667
- # Use our aggregator to call the MCP tool
775
+ # Use the appropriate handler for this tool
668
776
  result = await self.call_tool(tool_name, tool_args)
669
777
  tool_results[correlation_id] = result
670
778
 
@@ -675,12 +783,13 @@ class McpAgent(ABC, ToolAgent):
675
783
  namespaced_tool.server_name
676
784
  )
677
785
 
678
- self.display.show_tool_result(
679
- name=self._name,
680
- result=result,
681
- tool_name=display_tool_name,
682
- skybridge_config=skybridge_config,
683
- )
786
+ if not getattr(result, "_suppress_display", False):
787
+ self.display.show_tool_result(
788
+ name=self._name,
789
+ result=result,
790
+ tool_name=display_tool_name,
791
+ skybridge_config=skybridge_config,
792
+ )
684
793
 
685
794
  self.logger.debug(f"MCP tool {display_tool_name} executed successfully")
686
795
  except Exception as e:
@@ -694,7 +803,7 @@ class McpAgent(ABC, ToolAgent):
694
803
  # Show error result too (no need for skybridge config on errors)
695
804
  self.display.show_tool_result(name=self._name, result=error_result)
696
805
 
697
- return self._finalize_tool_results(tool_results)
806
+ return self._finalize_tool_results(tool_results, tool_loop_error=tool_loop_error)
698
807
 
699
808
  async def apply_prompt_template(self, prompt_result: GetPromptResult, prompt_name: str) -> str:
700
809
  """
@@ -873,6 +982,9 @@ class McpAgent(ABC, ToolAgent):
873
982
 
874
983
  result[special_server_name].append(self._human_input_tool)
875
984
 
985
+ # if self._skill_lookup_tool:
986
+ # result.setdefault("__skills__", []).append(self._skill_lookup_tool)
987
+
876
988
  return result
877
989
 
878
990
  @property
@@ -985,6 +1097,18 @@ class McpAgent(ABC, ToolAgent):
985
1097
  Convert a Tool to an AgentSkill.
986
1098
  """
987
1099
 
1100
+ if tool.name in self._skill_map:
1101
+ manifest = self._skill_map[tool.name]
1102
+ return AgentSkill(
1103
+ id=f"skill:{manifest.name}",
1104
+ name=manifest.name,
1105
+ description=manifest.description or "",
1106
+ tags=["skill"],
1107
+ examples=None,
1108
+ input_modes=None,
1109
+ output_modes=None,
1110
+ )
1111
+
988
1112
  _, tool_without_namespace = await self._parse_resource_name(tool.name, "tool")
989
1113
  return AgentSkill(
990
1114
  id=tool.name,
@@ -39,7 +39,6 @@ class ToolAgent(LlmAgent):
39
39
 
40
40
  self._execution_tools: dict[str, FastMCPTool] = {}
41
41
  self._tool_schemas: list[Tool] = []
42
- self._tool_loop_error: str | None = None
43
42
 
44
43
  # Build a working list of tools and auto-inject human-input tool if missing
45
44
  working_tools: list[FastMCPTool | Callable] = list(tools) if tools else []
@@ -96,18 +95,23 @@ class ToolAgent(LlmAgent):
96
95
  )
97
96
 
98
97
  if LlmStopReason.TOOL_USE == result.stop_reason:
99
- self._tool_loop_error = None
98
+ tool_message = await self.run_tools(result)
99
+ error_channel_messages = (tool_message.channels or {}).get(FAST_AGENT_ERROR_CHANNEL)
100
+ if error_channel_messages:
101
+ tool_result_contents = [
102
+ content
103
+ for tool_result in (tool_message.tool_results or {}).values()
104
+ for content in tool_result.content
105
+ ]
106
+ if tool_result_contents:
107
+ if result.content is None:
108
+ result.content = []
109
+ result.content.extend(tool_result_contents)
110
+ result.stop_reason = LlmStopReason.ERROR
111
+ break
100
112
  if self.config.use_history:
101
- tool_message = await self.run_tools(result)
102
- if self._tool_loop_error:
103
- result.stop_reason = LlmStopReason.ERROR
104
- break
105
113
  messages = [tool_message]
106
114
  else:
107
- tool_message = await self.run_tools(result)
108
- if self._tool_loop_error:
109
- result.stop_reason = LlmStopReason.ERROR
110
- break
111
115
  messages.extend([result, tool_message])
112
116
  else:
113
117
  break
@@ -131,7 +135,7 @@ class ToolAgent(LlmAgent):
131
135
  return PromptMessageExtended(role="user", tool_results={})
132
136
 
133
137
  tool_results: dict[str, CallToolResult] = {}
134
- self._tool_loop_error = None
138
+ tool_loop_error: str | None = None
135
139
  # TODO -- use gather() for parallel results, update display
136
140
  available_tools = [t.name for t in (await self.list_tools()).tools]
137
141
  for correlation_id, tool_request in request.tool_calls.items():
@@ -141,7 +145,7 @@ class ToolAgent(LlmAgent):
141
145
  if tool_name not in self._execution_tools:
142
146
  error_message = f"Tool '{tool_name}' is not available"
143
147
  logger.error(error_message)
144
- self._mark_tool_loop_error(
148
+ tool_loop_error = self._mark_tool_loop_error(
145
149
  correlation_id=correlation_id,
146
150
  error_message=error_message,
147
151
  tool_results=tool_results,
@@ -170,7 +174,7 @@ class ToolAgent(LlmAgent):
170
174
  tool_results[correlation_id] = result
171
175
  self.display.show_tool_result(name=self.name, result=result, tool_name=tool_name)
172
176
 
173
- return self._finalize_tool_results(tool_results)
177
+ return self._finalize_tool_results(tool_results, tool_loop_error=tool_loop_error)
174
178
 
175
179
  def _mark_tool_loop_error(
176
180
  self,
@@ -178,24 +182,34 @@ class ToolAgent(LlmAgent):
178
182
  correlation_id: str,
179
183
  error_message: str,
180
184
  tool_results: dict[str, CallToolResult],
181
- ) -> None:
185
+ ) -> str:
182
186
  error_result = CallToolResult(
183
187
  content=[text_content(error_message)],
184
188
  isError=True,
185
189
  )
186
190
  tool_results[correlation_id] = error_result
187
191
  self.display.show_tool_result(name=self.name, result=error_result)
188
- self._tool_loop_error = error_message
192
+ return error_message
189
193
 
190
194
  def _finalize_tool_results(
191
- self, tool_results: dict[str, CallToolResult]
195
+ self,
196
+ tool_results: dict[str, CallToolResult],
197
+ *,
198
+ tool_loop_error: str | None = None,
192
199
  ) -> PromptMessageExtended:
193
200
  channels = None
194
- if self._tool_loop_error:
201
+ content = []
202
+ if tool_loop_error:
203
+ content.append(text_content(tool_loop_error))
195
204
  channels = {
196
- FAST_AGENT_ERROR_CHANNEL: [text_content(self._tool_loop_error)],
205
+ FAST_AGENT_ERROR_CHANNEL: [text_content(tool_loop_error)],
197
206
  }
198
- return PromptMessageExtended(role="user", tool_results=tool_results, channels=channels)
207
+ return PromptMessageExtended(
208
+ role="user",
209
+ content=content,
210
+ tool_results=tool_results,
211
+ channels=channels,
212
+ )
199
213
 
200
214
  async def list_tools(self) -> ListToolsResult:
201
215
  """Return available tools for this agent. Overridable by subclasses."""
@@ -213,7 +213,8 @@ class RouterAgent(LlmAgent):
213
213
  agent: LlmAgent = self.agent_map[route.agent]
214
214
 
215
215
  # Dispatch the request to the selected agent
216
- return await agent.generate_impl(messages, request_params)
216
+ # discarded request_params: use llm defaults for subagents
217
+ return await agent.generate_impl(messages)
217
218
 
218
219
  async def structured_impl(
219
220
  self,
@@ -1,3 +1,5 @@
1
+ import asyncio
2
+ import json
1
3
  import sys
2
4
 
3
5
  from fast_agent.cli.constants import GO_SPECIFIC_OPTIONS, KNOWN_SUBCOMMANDS
@@ -8,6 +10,39 @@ from fast_agent.cli.main import app
8
10
 
9
11
  def main():
10
12
  """Main entry point that handles auto-routing to 'go' command."""
13
+ try:
14
+ loop = asyncio.get_event_loop()
15
+
16
+ def _log_asyncio_exception(loop: asyncio.AbstractEventLoop, context: dict) -> None:
17
+ import logging
18
+
19
+ logger = logging.getLogger("fast_agent.asyncio")
20
+
21
+ message = context.get("message", "(no message)")
22
+ task = context.get("task")
23
+ future = context.get("future")
24
+ handle = context.get("handle")
25
+ source_traceback = context.get("source_traceback")
26
+ exception = context.get("exception")
27
+
28
+ details = {
29
+ "message": message,
30
+ "task": repr(task) if task else None,
31
+ "future": repr(future) if future else None,
32
+ "handle": repr(handle) if handle else None,
33
+ "source_traceback": [str(frame) for frame in source_traceback] if source_traceback else None,
34
+ }
35
+
36
+ logger.error("Unhandled asyncio error: %s", message)
37
+ logger.error("Asyncio context: %s", json.dumps(details, indent=2))
38
+
39
+ if exception:
40
+ logger.exception("Asyncio exception", exc_info=exception)
41
+
42
+ loop.set_exception_handler(_log_asyncio_exception)
43
+ except RuntimeError:
44
+ # No running loop yet (rare for sync entry), safe to ignore
45
+ pass
11
46
  # Check if we should auto-route to 'go'
12
47
  if len(sys.argv) > 1:
13
48
  # Check if first arg is not already a subcommand