fast-agent-mcp 0.3.14__py3-none-any.whl → 0.3.15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of fast-agent-mcp might be problematic. Click here for more details.

@@ -8,7 +8,7 @@ This class extends LlmDecorator with LLM-specific interaction behaviors includin
8
8
  - Chat display integration
9
9
  """
10
10
 
11
- from typing import List, Optional, Tuple
11
+ from typing import Callable, List, Optional, Tuple
12
12
 
13
13
  try:
14
14
  from a2a.types import AgentCapabilities # type: ignore
@@ -218,6 +218,13 @@ class LlmAgent(LlmDecorator):
218
218
  chat_turn = self._llm.chat_turn()
219
219
  self.display.show_user_message(message.last_text() or "", model, chat_turn, name=self.name)
220
220
 
221
+ def _should_stream(self) -> bool:
222
+ """Determine whether streaming display should be used."""
223
+ if getattr(self, "display", None):
224
+ enabled, _ = self.display.resolve_streaming_preferences()
225
+ return enabled
226
+ return True
227
+
221
228
  async def generate_impl(
222
229
  self,
223
230
  messages: List[PromptMessageExtended],
@@ -232,11 +239,45 @@ class LlmAgent(LlmDecorator):
232
239
  self.show_user_message(message=messages[-1])
233
240
 
234
241
  # TODO - manage error catch, recovery, pause
235
- result, summary = await self._generate_with_summary(messages, request_params, tools)
242
+ summary_text: Text | None = None
236
243
 
237
- summary_text = Text(f"\n\n{summary.message}", style="dim red italic") if summary else None
244
+ if self._should_stream():
245
+ display_name = self.name
246
+ display_model = self.llm.model_name if self._llm else None
247
+
248
+ remove_listener: Callable[[], None] | None = None
249
+
250
+ with self.display.streaming_assistant_message(
251
+ name=display_name,
252
+ model=display_model,
253
+ ) as stream_handle:
254
+ try:
255
+ remove_listener = self.llm.add_stream_listener(stream_handle.update)
256
+ except Exception:
257
+ remove_listener = None
258
+
259
+ try:
260
+ result, summary = await self._generate_with_summary(
261
+ messages, request_params, tools
262
+ )
263
+ finally:
264
+ if remove_listener:
265
+ remove_listener()
266
+
267
+ if summary:
268
+ summary_text = Text(f"\n\n{summary.message}", style="dim red italic")
269
+
270
+ stream_handle.finalize(result)
271
+
272
+ await self.show_assistant_message(result, additional_message=summary_text)
273
+ else:
274
+ result, summary = await self._generate_with_summary(messages, request_params, tools)
275
+
276
+ summary_text = (
277
+ Text(f"\n\n{summary.message}", style="dim red italic") if summary else None
278
+ )
279
+ await self.show_assistant_message(result, additional_message=summary_text)
238
280
 
239
- await self.show_assistant_message(result, additional_message=summary_text)
240
281
  return result
241
282
 
242
283
  async def structured_impl(
@@ -608,7 +608,7 @@ class McpAgent(ABC, ToolAgent):
608
608
  return PromptMessageExtended(role="user", tool_results={})
609
609
 
610
610
  tool_results: dict[str, CallToolResult] = {}
611
- self._tool_loop_error = None
611
+ tool_loop_error: str | None = None
612
612
 
613
613
  # Cache available tool names (original, not namespaced) for display
614
614
  available_tools = [
@@ -639,7 +639,7 @@ class McpAgent(ABC, ToolAgent):
639
639
  if not tool_available:
640
640
  error_message = f"Tool '{display_tool_name}' is not available"
641
641
  self.logger.error(error_message)
642
- self._mark_tool_loop_error(
642
+ tool_loop_error = self._mark_tool_loop_error(
643
643
  correlation_id=correlation_id,
644
644
  error_message=error_message,
645
645
  tool_results=tool_results,
@@ -694,7 +694,7 @@ class McpAgent(ABC, ToolAgent):
694
694
  # Show error result too (no need for skybridge config on errors)
695
695
  self.display.show_tool_result(name=self._name, result=error_result)
696
696
 
697
- return self._finalize_tool_results(tool_results)
697
+ return self._finalize_tool_results(tool_results, tool_loop_error=tool_loop_error)
698
698
 
699
699
  async def apply_prompt_template(self, prompt_result: GetPromptResult, prompt_name: str) -> str:
700
700
  """
@@ -39,7 +39,6 @@ class ToolAgent(LlmAgent):
39
39
 
40
40
  self._execution_tools: dict[str, FastMCPTool] = {}
41
41
  self._tool_schemas: list[Tool] = []
42
- self._tool_loop_error: str | None = None
43
42
 
44
43
  # Build a working list of tools and auto-inject human-input tool if missing
45
44
  working_tools: list[FastMCPTool | Callable] = list(tools) if tools else []
@@ -96,18 +95,23 @@ class ToolAgent(LlmAgent):
96
95
  )
97
96
 
98
97
  if LlmStopReason.TOOL_USE == result.stop_reason:
99
- self._tool_loop_error = None
98
+ tool_message = await self.run_tools(result)
99
+ error_channel_messages = (tool_message.channels or {}).get(FAST_AGENT_ERROR_CHANNEL)
100
+ if error_channel_messages:
101
+ tool_result_contents = [
102
+ content
103
+ for tool_result in (tool_message.tool_results or {}).values()
104
+ for content in tool_result.content
105
+ ]
106
+ if tool_result_contents:
107
+ if result.content is None:
108
+ result.content = []
109
+ result.content.extend(tool_result_contents)
110
+ result.stop_reason = LlmStopReason.ERROR
111
+ break
100
112
  if self.config.use_history:
101
- tool_message = await self.run_tools(result)
102
- if self._tool_loop_error:
103
- result.stop_reason = LlmStopReason.ERROR
104
- break
105
113
  messages = [tool_message]
106
114
  else:
107
- tool_message = await self.run_tools(result)
108
- if self._tool_loop_error:
109
- result.stop_reason = LlmStopReason.ERROR
110
- break
111
115
  messages.extend([result, tool_message])
112
116
  else:
113
117
  break
@@ -131,7 +135,7 @@ class ToolAgent(LlmAgent):
131
135
  return PromptMessageExtended(role="user", tool_results={})
132
136
 
133
137
  tool_results: dict[str, CallToolResult] = {}
134
- self._tool_loop_error = None
138
+ tool_loop_error: str | None = None
135
139
  # TODO -- use gather() for parallel results, update display
136
140
  available_tools = [t.name for t in (await self.list_tools()).tools]
137
141
  for correlation_id, tool_request in request.tool_calls.items():
@@ -141,7 +145,7 @@ class ToolAgent(LlmAgent):
141
145
  if tool_name not in self._execution_tools:
142
146
  error_message = f"Tool '{tool_name}' is not available"
143
147
  logger.error(error_message)
144
- self._mark_tool_loop_error(
148
+ tool_loop_error = self._mark_tool_loop_error(
145
149
  correlation_id=correlation_id,
146
150
  error_message=error_message,
147
151
  tool_results=tool_results,
@@ -170,7 +174,7 @@ class ToolAgent(LlmAgent):
170
174
  tool_results[correlation_id] = result
171
175
  self.display.show_tool_result(name=self.name, result=result, tool_name=tool_name)
172
176
 
173
- return self._finalize_tool_results(tool_results)
177
+ return self._finalize_tool_results(tool_results, tool_loop_error=tool_loop_error)
174
178
 
175
179
  def _mark_tool_loop_error(
176
180
  self,
@@ -178,24 +182,34 @@ class ToolAgent(LlmAgent):
178
182
  correlation_id: str,
179
183
  error_message: str,
180
184
  tool_results: dict[str, CallToolResult],
181
- ) -> None:
185
+ ) -> str:
182
186
  error_result = CallToolResult(
183
187
  content=[text_content(error_message)],
184
188
  isError=True,
185
189
  )
186
190
  tool_results[correlation_id] = error_result
187
191
  self.display.show_tool_result(name=self.name, result=error_result)
188
- self._tool_loop_error = error_message
192
+ return error_message
189
193
 
190
194
  def _finalize_tool_results(
191
- self, tool_results: dict[str, CallToolResult]
195
+ self,
196
+ tool_results: dict[str, CallToolResult],
197
+ *,
198
+ tool_loop_error: str | None = None,
192
199
  ) -> PromptMessageExtended:
193
200
  channels = None
194
- if self._tool_loop_error:
201
+ content = []
202
+ if tool_loop_error:
203
+ content.append(text_content(tool_loop_error))
195
204
  channels = {
196
- FAST_AGENT_ERROR_CHANNEL: [text_content(self._tool_loop_error)],
205
+ FAST_AGENT_ERROR_CHANNEL: [text_content(tool_loop_error)],
197
206
  }
198
- return PromptMessageExtended(role="user", tool_results=tool_results, channels=channels)
207
+ return PromptMessageExtended(
208
+ role="user",
209
+ content=content,
210
+ tool_results=tool_results,
211
+ channels=channels,
212
+ )
199
213
 
200
214
  async def list_tools(self) -> ListToolsResult:
201
215
  """Return available tools for this agent. Overridable by subclasses."""
@@ -213,7 +213,8 @@ class RouterAgent(LlmAgent):
213
213
  agent: LlmAgent = self.agent_map[route.agent]
214
214
 
215
215
  # Dispatch the request to the selected agent
216
- return await agent.generate_impl(messages, request_params)
216
+ # discarded request_params: use llm defaults for subagents
217
+ return await agent.generate_impl(messages)
217
218
 
218
219
  async def structured_impl(
219
220
  self,
@@ -156,6 +156,7 @@ def get_config_summary(config_path: Optional[Path]) -> dict:
156
156
  "logger": {
157
157
  "level": default_settings.logger.level,
158
158
  "type": default_settings.logger.type,
159
+ "streaming": default_settings.logger.streaming,
159
160
  "progress_display": default_settings.logger.progress_display,
160
161
  "show_chat": default_settings.logger.show_chat,
161
162
  "show_tools": default_settings.logger.show_tools,
@@ -198,6 +199,7 @@ def get_config_summary(config_path: Optional[Path]) -> dict:
198
199
  result["logger"] = {
199
200
  "level": logger_config.get("level", default_settings.logger.level),
200
201
  "type": logger_config.get("type", default_settings.logger.type),
202
+ "streaming": logger_config.get("streaming", default_settings.logger.streaming),
201
203
  "progress_display": logger_config.get(
202
204
  "progress_display", default_settings.logger.progress_display
203
205
  ),
@@ -221,8 +223,7 @@ def get_config_summary(config_path: Optional[Path]) -> dict:
221
223
  timeline_override = MCPTimelineSettings(**(config.get("mcp_timeline") or {}))
222
224
  except Exception as exc: # pragma: no cover - defensive
223
225
  console.print(
224
- "[yellow]Warning:[/yellow] Invalid mcp_timeline configuration; "
225
- "using defaults."
226
+ "[yellow]Warning:[/yellow] Invalid mcp_timeline configuration; using defaults."
226
227
  )
227
228
  console.print(f"[yellow]Details:[/yellow] {exc}")
228
229
  else:
@@ -432,6 +433,8 @@ def show_check_summary() -> None:
432
433
  ("Log Level", logger.get("level", "warning (default)")),
433
434
  ("Log Type", logger.get("type", "file (default)")),
434
435
  ("MCP-UI", mcp_ui_display),
436
+ ("Streaming Mode", f"[green]{logger.get('streaming', 'markdown')}[/green]"),
437
+ ("Streaming Display", bool_to_symbol(logger.get("streaming_display", True))),
435
438
  ("Progress Display", bool_to_symbol(logger.get("progress_display", True))),
436
439
  ("Show Chat", bool_to_symbol(logger.get("show_chat", True))),
437
440
  ("Show Tools", bool_to_symbol(logger.get("show_tools", True))),
fast_agent/config.py CHANGED
@@ -448,8 +448,8 @@ class LoggerSettings(BaseModel):
448
448
  """Truncate display of long tool calls"""
449
449
  enable_markup: bool = True
450
450
  """Enable markup in console output. Disable for outputs that may conflict with rich console formatting"""
451
- use_legacy_display: bool = False
452
- """Use the legacy console display instead of the new style display"""
451
+ streaming: Literal["markdown", "plain", "none"] = "markdown"
452
+ """Streaming renderer for assistant responses"""
453
453
 
454
454
 
455
455
  def find_fastagent_config_files(start_path: Path) -> Tuple[Optional[Path], Optional[Path]]:
@@ -293,7 +293,9 @@ class FastAgent:
293
293
  default: bool = False,
294
294
  elicitation_handler: Optional[ElicitationFnT] = None,
295
295
  api_key: str | None = None,
296
- ) -> Callable[[Callable[P, Coroutine[Any, Any, R]]], Callable[P, Coroutine[Any, Any, R]]]: ...
296
+ ) -> Callable[
297
+ [Callable[P, Coroutine[Any, Any, R]]], Callable[P, Coroutine[Any, Any, R]]
298
+ ]: ...
297
299
 
298
300
  def custom(
299
301
  self,
@@ -474,6 +476,17 @@ class FastAgent:
474
476
  # Create a wrapper with all agents for simplified access
475
477
  wrapper = AgentApp(active_agents)
476
478
 
479
+ # Disable streaming if parallel agents are present
480
+ from fast_agent.agents.agent_types import AgentType
481
+
482
+ has_parallel = any(
483
+ agent.agent_type == AgentType.PARALLEL for agent in active_agents.values()
484
+ )
485
+ if has_parallel:
486
+ cfg = self.app.context.config
487
+ if cfg is not None and cfg.logger is not None:
488
+ cfg.logger.streaming = "none"
489
+
477
490
  # Handle command line options that should be processed after agent initialization
478
491
 
479
492
  # Handle --server option
@@ -200,6 +200,34 @@ def get_dependencies(
200
200
  return deps
201
201
 
202
202
 
203
+ def get_agent_dependencies(agent_data: dict[str, Any]) -> set[str]:
204
+ deps: set[str] = set()
205
+ agent_dependency_attribute_names = {
206
+ AgentType.CHAIN: ("sequence",),
207
+ AgentType.EVALUATOR_OPTIMIZER: ("evaluator", "generator", "eval_optimizer_agents"),
208
+ AgentType.ITERATIVE_PLANNER: ("child_agents",),
209
+ AgentType.ORCHESTRATOR: ("child_agents",),
210
+ AgentType.PARALLEL: ("fan_out", "fan_in", "parallel_agents"),
211
+ AgentType.ROUTER: ("router_agents",),
212
+ }
213
+ agent_type = agent_data["type"]
214
+ dependency_names = agent_dependency_attribute_names.get(agent_type, None)
215
+ if dependency_names is None:
216
+ return deps
217
+
218
+ for dependency_name in dependency_names:
219
+ dependency_value = agent_data.get(dependency_name)
220
+ if dependency_value is None:
221
+ continue
222
+ if isinstance(dependency_value, str):
223
+ deps.add(dependency_value)
224
+ else:
225
+ # here, we have an implicit assumption that if it is not a None or a string, then it is a list
226
+ deps.update(dependency_value)
227
+
228
+ return deps
229
+
230
+
203
231
  def get_dependencies_groups(
204
232
  agents_dict: Dict[str, Dict[str, Any]], allow_cycles: bool = False
205
233
  ) -> List[List[str]]:
@@ -221,39 +249,9 @@ def get_dependencies_groups(
221
249
  agent_names = list(agents_dict.keys())
222
250
 
223
251
  # Dictionary to store dependencies for each agent
224
- dependencies = {name: set() for name in agent_names}
225
-
226
- # Build the dependency graph
227
- for name, agent_data in agents_dict.items():
228
- agent_type = agent_data["type"] # This is a string from config
229
-
230
- # Note: Compare string values from config with the Enum's string value
231
- if agent_type == AgentType.PARALLEL.value:
232
- # Parallel agents depend on their fan-out and fan-in agents
233
- dependencies[name].update(agent_data.get("parallel_agents", []))
234
- # Also add explicit fan_out dependencies if present
235
- if "fan_out" in agent_data:
236
- dependencies[name].update(agent_data["fan_out"])
237
- # Add explicit fan_in dependency if present
238
- if "fan_in" in agent_data and agent_data["fan_in"]:
239
- dependencies[name].add(agent_data["fan_in"])
240
- elif agent_type == AgentType.CHAIN.value:
241
- # Chain agents depend on the agents in their sequence
242
- dependencies[name].update(agent_data.get("sequence", []))
243
- elif agent_type == AgentType.ROUTER.value:
244
- # Router agents depend on the agents they route to
245
- dependencies[name].update(agent_data.get("router_agents", []))
246
- elif agent_type == AgentType.ORCHESTRATOR.value:
247
- # Orchestrator agents depend on their child agents
248
- dependencies[name].update(agent_data.get("child_agents", []))
249
- elif agent_type == AgentType.EVALUATOR_OPTIMIZER.value:
250
- # Evaluator-Optimizer agents depend on their evaluator and generator agents
251
- if "evaluator" in agent_data:
252
- dependencies[name].add(agent_data["evaluator"])
253
- if "generator" in agent_data:
254
- dependencies[name].add(agent_data["generator"])
255
- # For backward compatibility - also check eval_optimizer_agents if present
256
- dependencies[name].update(agent_data.get("eval_optimizer_agents", []))
252
+ dependencies = {
253
+ name: get_agent_dependencies(agent_data) for name, agent_data in agents_dict.items()
254
+ }
257
255
 
258
256
  # Check for cycles if not allowed
259
257
  if not allow_cycles:
@@ -29,6 +29,8 @@ class StringField:
29
29
  schema["minLength"] = self.min_length
30
30
  if self.max_length is not None:
31
31
  schema["maxLength"] = self.max_length
32
+ if self.pattern is not None:
33
+ schema["pattern"] = self.pattern
32
34
  if self.format:
33
35
  schema["format"] = self.format
34
36
 
@@ -178,10 +180,11 @@ def string(
178
180
  default: Optional[str] = None,
179
181
  min_length: Optional[int] = None,
180
182
  max_length: Optional[int] = None,
183
+ pattern: Optional[str] = None,
181
184
  format: Optional[str] = None,
182
185
  ) -> StringField:
183
186
  """Create a string field."""
184
- return StringField(title, description, default, min_length, max_length, format)
187
+ return StringField(title, description, default, min_length, max_length, pattern, format)
185
188
 
186
189
 
187
190
  def email(
fast_agent/interfaces.py CHANGED
@@ -8,6 +8,7 @@ without pulling in MCP-specific code, helping to avoid circular imports.
8
8
  from typing import (
9
9
  TYPE_CHECKING,
10
10
  Any,
11
+ Callable,
11
12
  Dict,
12
13
  List,
13
14
  Mapping,
@@ -83,7 +84,9 @@ class FastAgentLLMProtocol(Protocol):
83
84
  self,
84
85
  request_params: RequestParams | None = None,
85
86
  ) -> RequestParams: ...
86
-
87
+
88
+ def add_stream_listener(self, listener: Callable[[str], None]) -> Callable[[], None]: ...
89
+
87
90
  @property
88
91
  def message_history(self) -> List[PromptMessageExtended]: ...
89
92
 
@@ -3,6 +3,7 @@ from contextvars import ContextVar
3
3
  from typing import (
4
4
  TYPE_CHECKING,
5
5
  Any,
6
+ Callable,
6
7
  Dict,
7
8
  Generic,
8
9
  List,
@@ -157,6 +158,7 @@ class FastAgentLLM(ContextDependent, FastAgentLLMProtocol, Generic[MessageParamT
157
158
 
158
159
  # Initialize usage tracking
159
160
  self._usage_accumulator = UsageAccumulator()
161
+ self._stream_listeners: set[Callable[[str], None]] = set()
160
162
 
161
163
  def _initialize_default_params(self, kwargs: dict) -> RequestParams:
162
164
  """Initialize default parameters for the LLM.
@@ -483,6 +485,8 @@ class FastAgentLLM(ContextDependent, FastAgentLLMProtocol, Generic[MessageParamT
483
485
  Returns:
484
486
  Updated estimated token count
485
487
  """
488
+ self._notify_stream_listeners(content)
489
+
486
490
  # Rough estimate: 1 token per 4 characters (OpenAI's typical ratio)
487
491
  text_length = len(content)
488
492
  additional_tokens = max(1, text_length // 4)
@@ -503,6 +507,33 @@ class FastAgentLLM(ContextDependent, FastAgentLLMProtocol, Generic[MessageParamT
503
507
 
504
508
  return new_total
505
509
 
510
+ def add_stream_listener(self, listener: Callable[[str], None]) -> Callable[[], None]:
511
+ """
512
+ Register a callback invoked with streaming text chunks.
513
+
514
+ Args:
515
+ listener: Callable receiving the text chunk emitted by the provider.
516
+
517
+ Returns:
518
+ A function that removes the listener when called.
519
+ """
520
+ self._stream_listeners.add(listener)
521
+
522
+ def remove() -> None:
523
+ self._stream_listeners.discard(listener)
524
+
525
+ return remove
526
+
527
+ def _notify_stream_listeners(self, chunk: str) -> None:
528
+ """Notify registered listeners with a streaming text chunk."""
529
+ if not chunk:
530
+ return
531
+ for listener in list(self._stream_listeners):
532
+ try:
533
+ listener(chunk)
534
+ except Exception:
535
+ self.logger.exception("Stream listener raised an exception")
536
+
506
537
  def _log_chat_finished(self, model: Optional[str] = None) -> None:
507
538
  """Log a chat finished event"""
508
539
  data = {
@@ -130,11 +130,9 @@ class ModelDatabase:
130
130
  context_window=400000, max_output_tokens=128000, tokenizes=OPENAI_MULTIMODAL
131
131
  )
132
132
 
133
- # TODO update to 32000
134
133
  ANTHROPIC_OPUS_4_VERSIONED = ModelParameters(
135
134
  context_window=200000, max_output_tokens=32000, tokenizes=ANTHROPIC_MULTIMODAL
136
135
  )
137
- # TODO update to 64000
138
136
  ANTHROPIC_SONNET_4_VERSIONED = ModelParameters(
139
137
  context_window=200000, max_output_tokens=64000, tokenizes=ANTHROPIC_MULTIMODAL
140
138
  )
@@ -237,6 +235,8 @@ class ModelDatabase:
237
235
  "claude-opus-4-0": ANTHROPIC_OPUS_4_VERSIONED,
238
236
  "claude-opus-4-1": ANTHROPIC_OPUS_4_VERSIONED,
239
237
  "claude-opus-4-20250514": ANTHROPIC_OPUS_4_VERSIONED,
238
+ "claude-haiku-4-5-20251001": ANTHROPIC_SONNET_4_VERSIONED,
239
+ "claude-haiku-4-5": ANTHROPIC_SONNET_4_VERSIONED,
240
240
  # DeepSeek Models
241
241
  "deepseek-chat": DEEPSEEK_CHAT_STANDARD,
242
242
  # Google Gemini Models (vanilla aliases and versioned)
@@ -86,6 +86,7 @@ class ModelFactory:
86
86
  "claude-sonnet-4-0": Provider.ANTHROPIC,
87
87
  "claude-sonnet-4-5-20250929": Provider.ANTHROPIC,
88
88
  "claude-sonnet-4-5": Provider.ANTHROPIC,
89
+ "claude-haiku-4-5": Provider.ANTHROPIC,
89
90
  "deepseek-chat": Provider.DEEPSEEK,
90
91
  "gemini-2.0-flash": Provider.GOOGLE,
91
92
  "gemini-2.5-flash-preview-05-20": Provider.GOOGLE,
@@ -109,9 +110,10 @@ class ModelFactory:
109
110
  "sonnet35": "claude-3-5-sonnet-latest",
110
111
  "sonnet37": "claude-3-7-sonnet-latest",
111
112
  "claude": "claude-sonnet-4-0",
112
- "haiku": "claude-3-5-haiku-latest",
113
+ "haiku": "claude-haiku-4-5",
113
114
  "haiku3": "claude-3-haiku-20240307",
114
115
  "haiku35": "claude-3-5-haiku-latest",
116
+ "hauku45": "claude-haiku-4-5",
115
117
  "opus": "claude-opus-4-1",
116
118
  "opus4": "claude-opus-4-1",
117
119
  "opus3": "claude-3-opus-latest",
@@ -319,6 +321,7 @@ class ModelFactory:
319
321
  return GroqLLM
320
322
  if provider == Provider.RESPONSES:
321
323
  from fast_agent.llm.provider.openai.responses import ResponsesLLM
324
+
322
325
  return ResponsesLLM
323
326
 
324
327
  except Exception as e:
@@ -1,4 +1,4 @@
1
- from typing import Dict, List, Optional
1
+ from typing import Dict, List, Mapping, Optional, Sequence
2
2
 
3
3
  from mcp.types import (
4
4
  CallToolRequest,
@@ -27,7 +27,7 @@ class PromptMessageExtended(BaseModel):
27
27
  content: List[ContentBlock] = []
28
28
  tool_calls: Dict[str, CallToolRequest] | None = None
29
29
  tool_results: Dict[str, CallToolResult] | None = None
30
- channels: Dict[str, List[ContentBlock]] | None = None
30
+ channels: Mapping[str, Sequence[ContentBlock]] | None = None
31
31
  stop_reason: LlmStopReason | None = None
32
32
 
33
33
  @classmethod
@@ -16,9 +16,9 @@ default_model: gpt-5-mini.low
16
16
  # mcp_ui_mode: enabled
17
17
 
18
18
  # MCP timeline display (adjust activity window/intervals in MCP UI + fast-agent check)
19
- #mcp_timeline:
20
- # steps: 20 # number of timeline buckets to render
21
- # step_seconds: 30 # seconds per bucket (accepts values like "45s", "2m")
19
+ mcp_timeline:
20
+ steps: 20 # number of timeline buckets to render
21
+ step_seconds: 15 # seconds per bucket (accepts values like "45s", "2m")
22
22
 
23
23
  # Logging and Console Configuration:
24
24
  logger:
@@ -28,13 +28,14 @@ logger:
28
28
 
29
29
  # Switch the progress display on or off
30
30
  progress_display: true
31
-
32
31
  # Show chat User/Assistant messages on the console
33
32
  show_chat: true
34
33
  # Show tool calls on the console
35
34
  show_tools: true
36
35
  # Truncate long tool responses on the console
37
36
  truncate_tools: true
37
+ # Streaming renderer for assistant responses: "markdown", "plain", or "none"
38
+ streaming: markdown
38
39
 
39
40
  # MCP Servers
40
41
  mcp: