fast-agent-mcp 0.3.13__py3-none-any.whl → 0.3.15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of fast-agent-mcp might be problematic. Click here for more details.

Files changed (44) hide show
  1. fast_agent/agents/llm_agent.py +59 -37
  2. fast_agent/agents/llm_decorator.py +13 -2
  3. fast_agent/agents/mcp_agent.py +21 -5
  4. fast_agent/agents/tool_agent.py +41 -29
  5. fast_agent/agents/workflow/router_agent.py +2 -1
  6. fast_agent/cli/commands/check_config.py +48 -1
  7. fast_agent/config.py +65 -2
  8. fast_agent/constants.py +3 -0
  9. fast_agent/context.py +42 -9
  10. fast_agent/core/fastagent.py +14 -1
  11. fast_agent/core/logging/listeners.py +1 -1
  12. fast_agent/core/validation.py +31 -33
  13. fast_agent/event_progress.py +2 -3
  14. fast_agent/human_input/form_fields.py +4 -1
  15. fast_agent/interfaces.py +12 -2
  16. fast_agent/llm/fastagent_llm.py +31 -0
  17. fast_agent/llm/model_database.py +2 -2
  18. fast_agent/llm/model_factory.py +8 -1
  19. fast_agent/llm/provider_key_manager.py +1 -0
  20. fast_agent/llm/provider_types.py +1 -0
  21. fast_agent/llm/request_params.py +3 -1
  22. fast_agent/mcp/mcp_aggregator.py +313 -40
  23. fast_agent/mcp/mcp_connection_manager.py +39 -9
  24. fast_agent/mcp/prompt_message_extended.py +2 -2
  25. fast_agent/mcp/skybridge.py +45 -0
  26. fast_agent/mcp/sse_tracking.py +287 -0
  27. fast_agent/mcp/transport_tracking.py +37 -3
  28. fast_agent/mcp/types.py +24 -0
  29. fast_agent/resources/examples/workflows/router.py +1 -0
  30. fast_agent/resources/setup/fastagent.config.yaml +7 -1
  31. fast_agent/ui/console_display.py +946 -84
  32. fast_agent/ui/elicitation_form.py +23 -1
  33. fast_agent/ui/enhanced_prompt.py +153 -58
  34. fast_agent/ui/interactive_prompt.py +57 -34
  35. fast_agent/ui/markdown_truncator.py +942 -0
  36. fast_agent/ui/mcp_display.py +110 -29
  37. fast_agent/ui/plain_text_truncator.py +68 -0
  38. fast_agent/ui/rich_progress.py +4 -1
  39. fast_agent/ui/streaming_buffer.py +449 -0
  40. {fast_agent_mcp-0.3.13.dist-info → fast_agent_mcp-0.3.15.dist-info}/METADATA +4 -3
  41. {fast_agent_mcp-0.3.13.dist-info → fast_agent_mcp-0.3.15.dist-info}/RECORD +44 -38
  42. {fast_agent_mcp-0.3.13.dist-info → fast_agent_mcp-0.3.15.dist-info}/WHEEL +0 -0
  43. {fast_agent_mcp-0.3.13.dist-info → fast_agent_mcp-0.3.15.dist-info}/entry_points.txt +0 -0
  44. {fast_agent_mcp-0.3.13.dist-info → fast_agent_mcp-0.3.15.dist-info}/licenses/LICENSE +0 -0
@@ -8,7 +8,7 @@ This class extends LlmDecorator with LLM-specific interaction behaviors includin
8
8
  - Chat display integration
9
9
  """
10
10
 
11
- from typing import List, Optional, Tuple
11
+ from typing import Callable, List, Optional, Tuple
12
12
 
13
13
  try:
14
14
  from a2a.types import AgentCapabilities # type: ignore
@@ -58,7 +58,16 @@ class LlmAgent(LlmDecorator):
58
58
  super().__init__(config=config, context=context)
59
59
 
60
60
  # Initialize display component
61
- self.display = ConsoleDisplay(config=self._context.config if self._context else None)
61
+ self._display = ConsoleDisplay(config=self._context.config if self._context else None)
62
+
63
+ @property
64
+ def display(self) -> ConsoleDisplay:
65
+ """UI display helper for presenting messages and tool activity."""
66
+ return self._display
67
+
68
+ @display.setter
69
+ def display(self, value: ConsoleDisplay) -> None:
70
+ self._display = value
62
71
 
63
72
  async def show_assistant_message(
64
73
  self,
@@ -139,7 +148,10 @@ class LlmAgent(LlmDecorator):
139
148
  else:
140
149
  # Fallback if we couldn't extract text
141
150
  additional_segments.append(
142
- Text(f"\n\nError details: {str(error_blocks[0])}", style="dim red italic")
151
+ Text(
152
+ f"\n\nError details: {str(error_blocks[0])}",
153
+ style="dim red italic",
154
+ )
143
155
  )
144
156
  else:
145
157
  # Fallback if no detailed error is available
@@ -206,6 +218,13 @@ class LlmAgent(LlmDecorator):
206
218
  chat_turn = self._llm.chat_turn()
207
219
  self.display.show_user_message(message.last_text() or "", model, chat_turn, name=self.name)
208
220
 
221
+ def _should_stream(self) -> bool:
222
+ """Determine whether streaming display should be used."""
223
+ if getattr(self, "display", None):
224
+ enabled, _ = self.display.resolve_streaming_preferences()
225
+ return enabled
226
+ return True
227
+
209
228
  async def generate_impl(
210
229
  self,
211
230
  messages: List[PromptMessageExtended],
@@ -219,13 +238,46 @@ class LlmAgent(LlmDecorator):
219
238
  if "user" == messages[-1].role:
220
239
  self.show_user_message(message=messages[-1])
221
240
 
222
- # TODO -- we should merge the request parameters here with the LLM defaults?
223
241
  # TODO - manage error catch, recovery, pause
224
- result, summary = await self._generate_with_summary(messages, request_params, tools)
242
+ summary_text: Text | None = None
225
243
 
226
- summary_text = Text(f"\n\n{summary.message}", style="dim red italic") if summary else None
244
+ if self._should_stream():
245
+ display_name = self.name
246
+ display_model = self.llm.model_name if self._llm else None
247
+
248
+ remove_listener: Callable[[], None] | None = None
249
+
250
+ with self.display.streaming_assistant_message(
251
+ name=display_name,
252
+ model=display_model,
253
+ ) as stream_handle:
254
+ try:
255
+ remove_listener = self.llm.add_stream_listener(stream_handle.update)
256
+ except Exception:
257
+ remove_listener = None
258
+
259
+ try:
260
+ result, summary = await self._generate_with_summary(
261
+ messages, request_params, tools
262
+ )
263
+ finally:
264
+ if remove_listener:
265
+ remove_listener()
266
+
267
+ if summary:
268
+ summary_text = Text(f"\n\n{summary.message}", style="dim red italic")
269
+
270
+ stream_handle.finalize(result)
271
+
272
+ await self.show_assistant_message(result, additional_message=summary_text)
273
+ else:
274
+ result, summary = await self._generate_with_summary(messages, request_params, tools)
275
+
276
+ summary_text = (
277
+ Text(f"\n\n{summary.message}", style="dim red italic") if summary else None
278
+ )
279
+ await self.show_assistant_message(result, additional_message=summary_text)
227
280
 
228
- await self.show_assistant_message(result, additional_message=summary_text)
229
281
  return result
230
282
 
231
283
  async def structured_impl(
@@ -243,33 +295,3 @@ class LlmAgent(LlmDecorator):
243
295
  summary_text = Text(f"\n\n{summary.message}", style="dim red italic") if summary else None
244
296
  await self.show_assistant_message(message=message, additional_message=summary_text)
245
297
  return result, message
246
-
247
- # async def show_prompt_loaded(
248
- # self,
249
- # prompt_name: str,
250
- # description: Optional[str] = None,
251
- # message_count: int = 0,
252
- # arguments: Optional[dict[str, str]] = None,
253
- # ) -> None:
254
- # """
255
- # Display information about a loaded prompt template.
256
-
257
- # Args:
258
- # prompt_name: The name of the prompt
259
- # description: Optional description of the prompt
260
- # message_count: Number of messages in the prompt
261
- # arguments: Optional dictionary of arguments passed to the prompt
262
- # """
263
- # # Get aggregator from attached LLM if available
264
- # aggregator = None
265
- # if self._llm and hasattr(self._llm, "aggregator"):
266
- # aggregator = self._llm.aggregator
267
-
268
- # await self.display.show_prompt_loaded(
269
- # prompt_name=prompt_name,
270
- # description=description,
271
- # message_count=message_count,
272
- # agent_name=self.name,
273
- # aggregator=aggregator,
274
- # arguments=arguments,
275
- # )
@@ -110,6 +110,11 @@ class LlmDecorator(AgentProtocol):
110
110
  self._llm: Optional[FastAgentLLMProtocol] = None
111
111
  self._initialized = False
112
112
 
113
+ @property
114
+ def context(self) -> Context | None:
115
+ """Optional execution context supplied at construction time."""
116
+ return self._context
117
+
113
118
  @property
114
119
  def initialized(self) -> bool:
115
120
  """Check if the agent is initialized."""
@@ -243,9 +248,12 @@ class LlmDecorator(AgentProtocol):
243
248
  """
244
249
  # Normalize all input types to a list of PromptMessageExtended
245
250
  multipart_messages = normalize_to_extended_list(messages)
251
+ final_request_params = (
252
+ self.llm.get_request_params(request_params) if self._llm else request_params
253
+ )
246
254
 
247
255
  with self._tracer.start_as_current_span(f"Agent: '{self._name}' generate"):
248
- return await self.generate_impl(multipart_messages, request_params, tools)
256
+ return await self.generate_impl(multipart_messages, final_request_params, tools)
249
257
 
250
258
  async def generate_impl(
251
259
  self,
@@ -352,9 +360,12 @@ class LlmDecorator(AgentProtocol):
352
360
  """
353
361
  # Normalize all input types to a list of PromptMessageExtended
354
362
  multipart_messages = normalize_to_extended_list(messages)
363
+ final_request_params = (
364
+ self.llm.get_request_params(request_params) if self._llm else request_params
365
+ )
355
366
 
356
367
  with self._tracer.start_as_current_span(f"Agent: '{self._name}' structured"):
357
- return await self.structured_impl(multipart_messages, model, request_params)
368
+ return await self.structured_impl(multipart_messages, model, final_request_params)
358
369
 
359
370
  async def structured_impl(
360
371
  self,
@@ -169,6 +169,11 @@ class McpAgent(ABC, ToolAgent):
169
169
  return {}
170
170
  return await self._aggregator.collect_server_status()
171
171
 
172
+ @property
173
+ def aggregator(self) -> MCPAggregator:
174
+ """Expose the MCP aggregator for UI integrations."""
175
+ return self._aggregator
176
+
172
177
  @property
173
178
  def initialized(self) -> bool:
174
179
  """Check if both the agent and aggregator are initialized."""
@@ -603,7 +608,7 @@ class McpAgent(ABC, ToolAgent):
603
608
  return PromptMessageExtended(role="user", tool_results={})
604
609
 
605
610
  tool_results: dict[str, CallToolResult] = {}
606
- self._tool_loop_error = None
611
+ tool_loop_error: str | None = None
607
612
 
608
613
  # Cache available tool names (original, not namespaced) for display
609
614
  available_tools = [
@@ -634,7 +639,7 @@ class McpAgent(ABC, ToolAgent):
634
639
  if not tool_available:
635
640
  error_message = f"Tool '{display_tool_name}' is not available"
636
641
  self.logger.error(error_message)
637
- self._mark_tool_loop_error(
642
+ tool_loop_error = self._mark_tool_loop_error(
638
643
  correlation_id=correlation_id,
639
644
  error_message=error_message,
640
645
  tool_results=tool_results,
@@ -664,7 +669,18 @@ class McpAgent(ABC, ToolAgent):
664
669
  tool_results[correlation_id] = result
665
670
 
666
671
  # Show tool result (like ToolAgent does)
667
- self.display.show_tool_result(name=self._name, result=result)
672
+ skybridge_config = None
673
+ if namespaced_tool:
674
+ skybridge_config = await self._aggregator.get_skybridge_config(
675
+ namespaced_tool.server_name
676
+ )
677
+
678
+ self.display.show_tool_result(
679
+ name=self._name,
680
+ result=result,
681
+ tool_name=display_tool_name,
682
+ skybridge_config=skybridge_config,
683
+ )
668
684
 
669
685
  self.logger.debug(f"MCP tool {display_tool_name} executed successfully")
670
686
  except Exception as e:
@@ -675,10 +691,10 @@ class McpAgent(ABC, ToolAgent):
675
691
  )
676
692
  tool_results[correlation_id] = error_result
677
693
 
678
- # Show error result too
694
+ # Show error result too (no need for skybridge config on errors)
679
695
  self.display.show_tool_result(name=self._name, result=error_result)
680
696
 
681
- return self._finalize_tool_results(tool_results)
697
+ return self._finalize_tool_results(tool_results, tool_loop_error=tool_loop_error)
682
698
 
683
699
  async def apply_prompt_template(self, prompt_result: GetPromptResult, prompt_name: str) -> str:
684
700
  """
@@ -5,7 +5,11 @@ from mcp.types import CallToolResult, ListToolsResult, Tool
5
5
 
6
6
  from fast_agent.agents.agent_types import AgentConfig
7
7
  from fast_agent.agents.llm_agent import LlmAgent
8
- from fast_agent.constants import FAST_AGENT_ERROR_CHANNEL, HUMAN_INPUT_TOOL_NAME
8
+ from fast_agent.constants import (
9
+ DEFAULT_MAX_ITERATIONS,
10
+ FAST_AGENT_ERROR_CHANNEL,
11
+ HUMAN_INPUT_TOOL_NAME,
12
+ )
9
13
  from fast_agent.context import Context
10
14
  from fast_agent.core.logging.logger import get_logger
11
15
  from fast_agent.mcp.helpers.content_helpers import text_content
@@ -15,13 +19,6 @@ from fast_agent.types.llm_stop_reason import LlmStopReason
15
19
 
16
20
  logger = get_logger(__name__)
17
21
 
18
- DEFAULT_MAX_TOOL_CALLS = 20
19
-
20
-
21
- # should we have MAX_TOOL_CALLS instead to constrain by number of tools rather than turns...?
22
- DEFAULT_MAX_ITERATIONS = 20
23
- """Maximum number of User/Assistant turns to take"""
24
-
25
22
 
26
23
  class ToolAgent(LlmAgent):
27
24
  """
@@ -42,7 +39,6 @@ class ToolAgent(LlmAgent):
42
39
 
43
40
  self._execution_tools: dict[str, FastMCPTool] = {}
44
41
  self._tool_schemas: list[Tool] = []
45
- self._tool_loop_error: str | None = None
46
42
 
47
43
  # Build a working list of tools and auto-inject human-input tool if missing
48
44
  working_tools: list[FastMCPTool | Callable] = list(tools) if tools else []
@@ -91,6 +87,7 @@ class ToolAgent(LlmAgent):
91
87
  tools = (await self.list_tools()).tools
92
88
 
93
89
  iterations = 0
90
+ max_iterations = request_params.max_iterations if request_params else DEFAULT_MAX_ITERATIONS
94
91
 
95
92
  while True:
96
93
  result = await super().generate_impl(
@@ -98,24 +95,29 @@ class ToolAgent(LlmAgent):
98
95
  )
99
96
 
100
97
  if LlmStopReason.TOOL_USE == result.stop_reason:
101
- self._tool_loop_error = None
98
+ tool_message = await self.run_tools(result)
99
+ error_channel_messages = (tool_message.channels or {}).get(FAST_AGENT_ERROR_CHANNEL)
100
+ if error_channel_messages:
101
+ tool_result_contents = [
102
+ content
103
+ for tool_result in (tool_message.tool_results or {}).values()
104
+ for content in tool_result.content
105
+ ]
106
+ if tool_result_contents:
107
+ if result.content is None:
108
+ result.content = []
109
+ result.content.extend(tool_result_contents)
110
+ result.stop_reason = LlmStopReason.ERROR
111
+ break
102
112
  if self.config.use_history:
103
- tool_message = await self.run_tools(result)
104
- if self._tool_loop_error:
105
- result.stop_reason = LlmStopReason.ERROR
106
- break
107
113
  messages = [tool_message]
108
114
  else:
109
- tool_message = await self.run_tools(result)
110
- if self._tool_loop_error:
111
- result.stop_reason = LlmStopReason.ERROR
112
- break
113
115
  messages.extend([result, tool_message])
114
116
  else:
115
117
  break
116
118
 
117
119
  iterations += 1
118
- if iterations > DEFAULT_MAX_ITERATIONS:
120
+ if iterations > max_iterations:
119
121
  logger.warning("Max iterations reached, stopping tool loop")
120
122
  break
121
123
  return result
@@ -133,7 +135,7 @@ class ToolAgent(LlmAgent):
133
135
  return PromptMessageExtended(role="user", tool_results={})
134
136
 
135
137
  tool_results: dict[str, CallToolResult] = {}
136
- self._tool_loop_error = None
138
+ tool_loop_error: str | None = None
137
139
  # TODO -- use gather() for parallel results, update display
138
140
  available_tools = [t.name for t in (await self.list_tools()).tools]
139
141
  for correlation_id, tool_request in request.tool_calls.items():
@@ -143,7 +145,7 @@ class ToolAgent(LlmAgent):
143
145
  if tool_name not in self._execution_tools:
144
146
  error_message = f"Tool '{tool_name}' is not available"
145
147
  logger.error(error_message)
146
- self._mark_tool_loop_error(
148
+ tool_loop_error = self._mark_tool_loop_error(
147
149
  correlation_id=correlation_id,
148
150
  error_message=error_message,
149
151
  tool_results=tool_results,
@@ -170,9 +172,9 @@ class ToolAgent(LlmAgent):
170
172
  # Delegate to call_tool for execution (overridable by subclasses)
171
173
  result = await self.call_tool(tool_name, tool_args)
172
174
  tool_results[correlation_id] = result
173
- self.display.show_tool_result(name=self.name, result=result)
175
+ self.display.show_tool_result(name=self.name, result=result, tool_name=tool_name)
174
176
 
175
- return self._finalize_tool_results(tool_results)
177
+ return self._finalize_tool_results(tool_results, tool_loop_error=tool_loop_error)
176
178
 
177
179
  def _mark_tool_loop_error(
178
180
  self,
@@ -180,24 +182,34 @@ class ToolAgent(LlmAgent):
180
182
  correlation_id: str,
181
183
  error_message: str,
182
184
  tool_results: dict[str, CallToolResult],
183
- ) -> None:
185
+ ) -> str:
184
186
  error_result = CallToolResult(
185
187
  content=[text_content(error_message)],
186
188
  isError=True,
187
189
  )
188
190
  tool_results[correlation_id] = error_result
189
191
  self.display.show_tool_result(name=self.name, result=error_result)
190
- self._tool_loop_error = error_message
192
+ return error_message
191
193
 
192
194
  def _finalize_tool_results(
193
- self, tool_results: dict[str, CallToolResult]
195
+ self,
196
+ tool_results: dict[str, CallToolResult],
197
+ *,
198
+ tool_loop_error: str | None = None,
194
199
  ) -> PromptMessageExtended:
195
200
  channels = None
196
- if self._tool_loop_error:
201
+ content = []
202
+ if tool_loop_error:
203
+ content.append(text_content(tool_loop_error))
197
204
  channels = {
198
- FAST_AGENT_ERROR_CHANNEL: [text_content(self._tool_loop_error)],
205
+ FAST_AGENT_ERROR_CHANNEL: [text_content(tool_loop_error)],
199
206
  }
200
- return PromptMessageExtended(role="user", tool_results=tool_results, channels=channels)
207
+ return PromptMessageExtended(
208
+ role="user",
209
+ content=content,
210
+ tool_results=tool_results,
211
+ channels=channels,
212
+ )
201
213
 
202
214
  async def list_tools(self) -> ListToolsResult:
203
215
  """Return available tools for this agent. Overridable by subclasses."""
@@ -213,7 +213,8 @@ class RouterAgent(LlmAgent):
213
213
  agent: LlmAgent = self.agent_map[route.agent]
214
214
 
215
215
  # Dispatch the request to the selected agent
216
- return await agent.generate_impl(messages, request_params)
216
+ # discarded request_params: use llm defaults for subagents
217
+ return await agent.generate_impl(messages)
217
218
 
218
219
  async def structured_impl(
219
220
  self,
@@ -144,7 +144,7 @@ def get_fastagent_version() -> str:
144
144
 
145
145
  def get_config_summary(config_path: Optional[Path]) -> dict:
146
146
  """Extract key information from the configuration file."""
147
- from fast_agent.config import Settings
147
+ from fast_agent.config import MCPTimelineSettings, Settings
148
148
 
149
149
  # Get actual defaults from Settings class
150
150
  default_settings = Settings()
@@ -156,6 +156,7 @@ def get_config_summary(config_path: Optional[Path]) -> dict:
156
156
  "logger": {
157
157
  "level": default_settings.logger.level,
158
158
  "type": default_settings.logger.type,
159
+ "streaming": default_settings.logger.streaming,
159
160
  "progress_display": default_settings.logger.progress_display,
160
161
  "show_chat": default_settings.logger.show_chat,
161
162
  "show_tools": default_settings.logger.show_tools,
@@ -163,6 +164,10 @@ def get_config_summary(config_path: Optional[Path]) -> dict:
163
164
  "enable_markup": default_settings.logger.enable_markup,
164
165
  },
165
166
  "mcp_ui_mode": default_settings.mcp_ui_mode,
167
+ "timeline": {
168
+ "steps": default_settings.mcp_timeline.steps,
169
+ "step_seconds": default_settings.mcp_timeline.step_seconds,
170
+ },
166
171
  "mcp_servers": [],
167
172
  }
168
173
 
@@ -194,6 +199,7 @@ def get_config_summary(config_path: Optional[Path]) -> dict:
194
199
  result["logger"] = {
195
200
  "level": logger_config.get("level", default_settings.logger.level),
196
201
  "type": logger_config.get("type", default_settings.logger.type),
202
+ "streaming": logger_config.get("streaming", default_settings.logger.streaming),
197
203
  "progress_display": logger_config.get(
198
204
  "progress_display", default_settings.logger.progress_display
199
205
  ),
@@ -211,6 +217,21 @@ def get_config_summary(config_path: Optional[Path]) -> dict:
211
217
  if "mcp_ui_mode" in config:
212
218
  result["mcp_ui_mode"] = config["mcp_ui_mode"]
213
219
 
220
+ # Get timeline settings
221
+ if "mcp_timeline" in config:
222
+ try:
223
+ timeline_override = MCPTimelineSettings(**(config.get("mcp_timeline") or {}))
224
+ except Exception as exc: # pragma: no cover - defensive
225
+ console.print(
226
+ "[yellow]Warning:[/yellow] Invalid mcp_timeline configuration; using defaults."
227
+ )
228
+ console.print(f"[yellow]Details:[/yellow] {exc}")
229
+ else:
230
+ result["timeline"] = {
231
+ "steps": timeline_override.steps,
232
+ "step_seconds": timeline_override.step_seconds,
233
+ }
234
+
214
235
  # Get MCP server info
215
236
  if "mcp" in config and "servers" in config["mcp"]:
216
237
  for server_name, server_config in config["mcp"]["servers"].items():
@@ -385,16 +406,42 @@ def show_check_summary() -> None:
385
406
  else:
386
407
  mcp_ui_display = f"[green]{mcp_ui_mode}[/green]"
387
408
 
409
+ timeline_settings = config_summary.get("timeline", {})
410
+ timeline_steps = timeline_settings.get("steps", 20)
411
+ timeline_step_seconds = timeline_settings.get("step_seconds", 30)
412
+
413
+ def format_step_interval(seconds: int) -> str:
414
+ try:
415
+ total = int(seconds)
416
+ except (TypeError, ValueError):
417
+ return str(seconds)
418
+ if total <= 0:
419
+ return "0s"
420
+ if total % 86400 == 0:
421
+ return f"{total // 86400}d"
422
+ if total % 3600 == 0:
423
+ return f"{total // 3600}h"
424
+ if total % 60 == 0:
425
+ return f"{total // 60}m"
426
+ minutes, secs = divmod(total, 60)
427
+ if minutes:
428
+ return f"{minutes}m{secs:02d}s"
429
+ return f"{secs}s"
430
+
388
431
  # Prepare all settings as pairs
389
432
  settings_data = [
390
433
  ("Log Level", logger.get("level", "warning (default)")),
391
434
  ("Log Type", logger.get("type", "file (default)")),
392
435
  ("MCP-UI", mcp_ui_display),
436
+ ("Streaming Mode", f"[green]{logger.get('streaming', 'markdown')}[/green]"),
437
+ ("Streaming Display", bool_to_symbol(logger.get("streaming_display", True))),
393
438
  ("Progress Display", bool_to_symbol(logger.get("progress_display", True))),
394
439
  ("Show Chat", bool_to_symbol(logger.get("show_chat", True))),
395
440
  ("Show Tools", bool_to_symbol(logger.get("show_tools", True))),
396
441
  ("Truncate Tools", bool_to_symbol(logger.get("truncate_tools", True))),
397
442
  ("Enable Markup", bool_to_symbol(logger.get("enable_markup", True))),
443
+ ("Timeline Steps", f"[green]{timeline_steps}[/green]"),
444
+ ("Timeline Interval", f"[green]{format_step_interval(timeline_step_seconds)}[/green]"),
398
445
  ]
399
446
 
400
447
  # Add rows in two-column layout, styling some values in green
fast_agent/config.py CHANGED
@@ -49,6 +49,66 @@ class MCPElicitationSettings(BaseModel):
49
49
  model_config = ConfigDict(extra="allow", arbitrary_types_allowed=True)
50
50
 
51
51
 
52
+ class MCPTimelineSettings(BaseModel):
53
+ """Configuration for MCP activity timeline display."""
54
+
55
+ steps: int = 20
56
+ """Number of timeline buckets to render."""
57
+
58
+ step_seconds: int = 30
59
+ """Duration of each timeline bucket in seconds."""
60
+
61
+ model_config = ConfigDict(extra="allow", arbitrary_types_allowed=True)
62
+
63
+ @staticmethod
64
+ def _parse_duration(value: str) -> int:
65
+ """Parse simple duration strings like '30s', '2m', '1h' into seconds."""
66
+ pattern = re.compile(r"^\s*(\d+)\s*([smhd]?)\s*$", re.IGNORECASE)
67
+ match = pattern.match(value)
68
+ if not match:
69
+ raise ValueError("Expected duration in seconds (e.g. 30, '45s', '2m').")
70
+ amount = int(match.group(1))
71
+ unit = match.group(2).lower()
72
+ multiplier = {
73
+ "": 1,
74
+ "s": 1,
75
+ "m": 60,
76
+ "h": 3600,
77
+ "d": 86400,
78
+ }.get(unit)
79
+ if multiplier is None:
80
+ raise ValueError("Duration unit must be one of s, m, h, or d.")
81
+ return amount * multiplier
82
+
83
+ @field_validator("steps", mode="before")
84
+ @classmethod
85
+ def _coerce_steps(cls, value: Any) -> int:
86
+ if isinstance(value, str):
87
+ if not value.strip().isdigit():
88
+ raise ValueError("Timeline steps must be a positive integer.")
89
+ value = int(value.strip())
90
+ elif isinstance(value, float):
91
+ value = int(value)
92
+ if not isinstance(value, int):
93
+ raise TypeError("Timeline steps must be an integer.")
94
+ if value <= 0:
95
+ raise ValueError("Timeline steps must be greater than zero.")
96
+ return value
97
+
98
+ @field_validator("step_seconds", mode="before")
99
+ @classmethod
100
+ def _coerce_step_seconds(cls, value: Any) -> int:
101
+ if isinstance(value, str):
102
+ value = cls._parse_duration(value)
103
+ elif isinstance(value, (int, float)):
104
+ value = int(value)
105
+ else:
106
+ raise TypeError("Timeline step duration must be a number of seconds.")
107
+ if value <= 0:
108
+ raise ValueError("Timeline step duration must be greater than zero.")
109
+ return value
110
+
111
+
52
112
  class MCPRootSettings(BaseModel):
53
113
  """Represents a root directory configuration for an MCP server."""
54
114
 
@@ -388,8 +448,8 @@ class LoggerSettings(BaseModel):
388
448
  """Truncate display of long tool calls"""
389
449
  enable_markup: bool = True
390
450
  """Enable markup in console output. Disable for outputs that may conflict with rich console formatting"""
391
- use_legacy_display: bool = False
392
- """Use the legacy console display instead of the new style display"""
451
+ streaming: Literal["markdown", "plain", "none"] = "markdown"
452
+ """Streaming renderer for assistant responses"""
393
453
 
394
454
 
395
455
  def find_fastagent_config_files(start_path: Path) -> Tuple[Optional[Path], Optional[Path]]:
@@ -528,6 +588,9 @@ class Settings(BaseSettings):
528
588
  mcp_ui_output_dir: str = ".fast-agent/ui"
529
589
  """Directory where MCP-UI HTML files are written. Relative paths are resolved from CWD."""
530
590
 
591
+ mcp_timeline: MCPTimelineSettings = MCPTimelineSettings()
592
+ """Display settings for MCP activity timelines."""
593
+
531
594
  @classmethod
532
595
  def find_config(cls) -> Path | None:
533
596
  """Find the config file in the current directory or parent directories."""
fast_agent/constants.py CHANGED
@@ -8,3 +8,6 @@ MCP_UI = "mcp-ui"
8
8
  REASONING = "reasoning"
9
9
  FAST_AGENT_ERROR_CHANNEL = "fast-agent-error"
10
10
  FAST_AGENT_REMOVED_METADATA_CHANNEL = "fast-agent-removed-meta"
11
+ # should we have MAX_TOOL_CALLS instead to constrain by number of tools rather than turns...?
12
+ DEFAULT_MAX_ITERATIONS = 20
13
+ """Maximum number of User/Assistant turns to take"""