fast-agent-mcp 0.3.10__py3-none-any.whl → 0.3.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of fast-agent-mcp might be problematic. Click here for more details.

@@ -29,64 +29,81 @@ async def elicitation_input_callback(
29
29
  )
30
30
  effective_server_name = server_name or "Unknown Server"
31
31
 
32
- # Check if elicitation is disabled for this server
33
- if elicitation_state.is_disabled(effective_server_name):
32
+ # Start tracking elicitation operation
33
+ try:
34
+ from fast_agent.ui import notification_tracker
35
+ notification_tracker.start_elicitation(effective_server_name)
36
+ except Exception:
37
+ # Don't let notification tracking break elicitation
38
+ pass
39
+
40
+ try:
41
+ # Check if elicitation is disabled for this server
42
+ if elicitation_state.is_disabled(effective_server_name):
43
+ return HumanInputResponse(
44
+ request_id=request.request_id,
45
+ response="__CANCELLED__",
46
+ metadata={"auto_cancelled": True, "reason": "Server elicitation disabled by user"},
47
+ )
48
+
49
+ # Get the elicitation schema from metadata
50
+ schema: Optional[Dict[str, Any]] = None
51
+ if request.metadata and "requested_schema" in request.metadata:
52
+ schema = request.metadata["requested_schema"]
53
+
54
+ # Use the context manager to pause the progress display while getting input
55
+ with progress_display.paused():
56
+ try:
57
+ if schema:
58
+ form_action, form_data = await show_simple_elicitation_form(
59
+ schema=schema,
60
+ message=request.prompt,
61
+ agent_name=effective_agent_name,
62
+ server_name=effective_server_name,
63
+ )
64
+
65
+ if form_action == "accept" and form_data is not None:
66
+ # Convert form data to JSON string
67
+ import json
68
+
69
+ response = json.dumps(form_data)
70
+ elif form_action == "decline":
71
+ response = "__DECLINED__"
72
+ elif form_action == "disable":
73
+ response = "__DISABLE_SERVER__"
74
+ else: # cancel
75
+ response = "__CANCELLED__"
76
+ else:
77
+ # No schema, fall back to text input using prompt_toolkit only
78
+ from prompt_toolkit.shortcuts import input_dialog
79
+
80
+ response = await input_dialog(
81
+ title="Input Requested",
82
+ text=f"Agent: {effective_agent_name}\nServer: {effective_server_name}\n\n{request.prompt}",
83
+ style=ELICITATION_STYLE,
84
+ ).run_async()
85
+
86
+ if response is None:
87
+ response = "__CANCELLED__"
88
+
89
+ except KeyboardInterrupt:
90
+ response = "__CANCELLED__"
91
+ except EOFError:
92
+ response = "__CANCELLED__"
93
+
34
94
  return HumanInputResponse(
35
95
  request_id=request.request_id,
36
- response="__CANCELLED__",
37
- metadata={"auto_cancelled": True, "reason": "Server elicitation disabled by user"},
96
+ response=response.strip() if isinstance(response, str) else response,
97
+ metadata={"has_schema": schema is not None},
38
98
  )
39
-
40
- # Get the elicitation schema from metadata
41
- schema: Optional[Dict[str, Any]] = None
42
- if request.metadata and "requested_schema" in request.metadata:
43
- schema = request.metadata["requested_schema"]
44
-
45
- # Use the context manager to pause the progress display while getting input
46
- with progress_display.paused():
99
+ finally:
100
+ # End tracking elicitation operation
47
101
  try:
48
- if schema:
49
- form_action, form_data = await show_simple_elicitation_form(
50
- schema=schema,
51
- message=request.prompt,
52
- agent_name=effective_agent_name,
53
- server_name=effective_server_name,
54
- )
55
-
56
- if form_action == "accept" and form_data is not None:
57
- # Convert form data to JSON string
58
- import json
59
-
60
- response = json.dumps(form_data)
61
- elif form_action == "decline":
62
- response = "__DECLINED__"
63
- elif form_action == "disable":
64
- response = "__DISABLE_SERVER__"
65
- else: # cancel
66
- response = "__CANCELLED__"
67
- else:
68
- # No schema, fall back to text input using prompt_toolkit only
69
- from prompt_toolkit.shortcuts import input_dialog
70
-
71
- response = await input_dialog(
72
- title="Input Requested",
73
- text=f"Agent: {effective_agent_name}\nServer: {effective_server_name}\n\n{request.prompt}",
74
- style=ELICITATION_STYLE,
75
- ).run_async()
76
-
77
- if response is None:
78
- response = "__CANCELLED__"
79
-
80
- except KeyboardInterrupt:
81
- response = "__CANCELLED__"
82
- except EOFError:
83
- response = "__CANCELLED__"
84
-
85
- return HumanInputResponse(
86
- request_id=request.request_id,
87
- response=response.strip() if isinstance(response, str) else response,
88
- metadata={"has_schema": schema is not None},
89
- )
102
+ from fast_agent.ui import notification_tracker
103
+ notification_tracker.end_elicitation(effective_server_name)
104
+ except Exception:
105
+ # Don't let notification tracking break elicitation
106
+ pass
90
107
 
91
108
 
92
109
  # Register adapter with fast_agent tools so they can invoke this UI handler without importing types
fast_agent/interfaces.py CHANGED
@@ -94,6 +94,8 @@ class FastAgentLLMProtocol(Protocol):
94
94
  @property
95
95
  def model_info(self) -> "ModelInfo | None": ...
96
96
 
97
+ def clear(self, *, clear_prompts: bool = False) -> None: ...
98
+
97
99
 
98
100
  class LlmAgentProtocol(Protocol):
99
101
  """Protocol defining the minimal interface for LLM agents."""
@@ -111,6 +113,8 @@ class LlmAgentProtocol(Protocol):
111
113
 
112
114
  async def shutdown(self) -> None: ...
113
115
 
116
+ def clear(self, *, clear_prompts: bool = False) -> None: ...
117
+
114
118
 
115
119
  class AgentProtocol(LlmAgentProtocol):
116
120
  """Standard agent interface with flexible input types."""
@@ -129,6 +129,7 @@ class FastAgentLLM(ContextDependent, FastAgentLLMProtocol, Generic[MessageParamT
129
129
  self.history: Memory[MessageParamT] = SimpleMemory[MessageParamT]()
130
130
 
131
131
  self._message_history: List[PromptMessageExtended] = []
132
+ self._template_messages: List[PromptMessageExtended] = []
132
133
 
133
134
  # Initialize the display component
134
135
  from fast_agent.ui.console_display import ConsoleDisplay
@@ -575,11 +576,15 @@ class FastAgentLLM(ContextDependent, FastAgentLLMProtocol, Generic[MessageParamT
575
576
 
576
577
  # Convert to PromptMessageExtended objects
577
578
  multipart_messages = PromptMessageExtended.parse_get_prompt_result(prompt_result)
579
+ # Store a local copy of template messages so we can retain them across clears
580
+ self._template_messages = [msg.model_copy(deep=True) for msg in multipart_messages]
578
581
 
579
582
  # Delegate to the provider-specific implementation
580
583
  result = await self._apply_prompt_provider_specific(
581
584
  multipart_messages, None, is_template=True
582
585
  )
586
+ # Ensure message history always includes the stored template when applied
587
+ self._message_history = [msg.model_copy(deep=True) for msg in self._template_messages]
583
588
  return result.first_text()
584
589
 
585
590
  async def _save_history(self, filename: str) -> None:
@@ -607,6 +612,18 @@ class FastAgentLLM(ContextDependent, FastAgentLLMProtocol, Generic[MessageParamT
607
612
  """
608
613
  return self._message_history
609
614
 
615
+ def clear(self, *, clear_prompts: bool = False) -> None:
616
+ """Reset stored message history while optionally retaining prompt templates."""
617
+
618
+ self.history.clear(clear_prompts=clear_prompts)
619
+ if clear_prompts:
620
+ self._template_messages = []
621
+ self._message_history = []
622
+ return
623
+
624
+ # Restore message history to template messages only; new turns will append as normal
625
+ self._message_history = [msg.model_copy(deep=True) for msg in self._template_messages]
626
+
610
627
  def _api_key(self):
611
628
  if self._init_api_key:
612
629
  return self._init_api_key
@@ -232,6 +232,8 @@ class ModelDatabase:
232
232
  "claude-3-7-sonnet-latest": ANTHROPIC_37_SERIES,
233
233
  "claude-sonnet-4-0": ANTHROPIC_SONNET_4_VERSIONED,
234
234
  "claude-sonnet-4-20250514": ANTHROPIC_SONNET_4_VERSIONED,
235
+ "claude-sonnet-4-5": ANTHROPIC_SONNET_4_VERSIONED,
236
+ "claude-sonnet-4-5-20250929": ANTHROPIC_SONNET_4_VERSIONED,
235
237
  "claude-opus-4-0": ANTHROPIC_OPUS_4_VERSIONED,
236
238
  "claude-opus-4-1": ANTHROPIC_OPUS_4_VERSIONED,
237
239
  "claude-opus-4-20250514": ANTHROPIC_OPUS_4_VERSIONED,
@@ -84,6 +84,8 @@ class ModelFactory:
84
84
  "claude-opus-4-20250514": Provider.ANTHROPIC,
85
85
  "claude-sonnet-4-20250514": Provider.ANTHROPIC,
86
86
  "claude-sonnet-4-0": Provider.ANTHROPIC,
87
+ "claude-sonnet-4-5-20250929": Provider.ANTHROPIC,
88
+ "claude-sonnet-4-5": Provider.ANTHROPIC,
87
89
  "deepseek-chat": Provider.DEEPSEEK,
88
90
  "gemini-2.0-flash": Provider.GOOGLE,
89
91
  "gemini-2.5-flash-preview-05-20": Provider.GOOGLE,
@@ -101,8 +103,9 @@ class ModelFactory:
101
103
  }
102
104
 
103
105
  MODEL_ALIASES = {
104
- "sonnet": "claude-sonnet-4-0",
106
+ "sonnet": "claude-sonnet-4-5",
105
107
  "sonnet4": "claude-sonnet-4-0",
108
+ "sonnet45": "claude-sonnet-4-5",
106
109
  "sonnet35": "claude-3-5-sonnet-latest",
107
110
  "sonnet37": "claude-3-7-sonnet-latest",
108
111
  "claude": "claude-sonnet-4-0",
@@ -53,7 +53,7 @@ def resolve_elicitation_handler(
53
53
  if mode == "none":
54
54
  logger.debug(f"Elicitation disabled by server config for agent {agent_config.name}")
55
55
  return None # Don't advertise elicitation capability
56
- elif mode == "auto_cancel":
56
+ elif mode == "auto-cancel":
57
57
  logger.debug(
58
58
  f"Using auto-cancel elicitation handler (server config) for agent {agent_config.name}"
59
59
  )
@@ -74,7 +74,7 @@ def resolve_elicitation_handler(
74
74
  if mode == "none":
75
75
  logger.debug(f"Elicitation disabled by global config for agent {agent_config.name}")
76
76
  return None # Don't advertise elicitation capability
77
- elif mode == "auto_cancel":
77
+ elif mode == "auto-cancel":
78
78
  logger.debug(
79
79
  f"Using auto-cancel elicitation handler (global config) for agent {agent_config.name}"
80
80
  )
@@ -1362,22 +1362,15 @@ class MCPAggregator(ContextDependent):
1362
1362
 
1363
1363
  async with self._refresh_lock:
1364
1364
  try:
1365
- # Fetch new tools from the server
1366
- if self.connection_persistence:
1367
- server_connection = await self._persistent_connection_manager.get_server(
1368
- server_name,
1369
- client_session_factory=self._create_session_factory(server_name),
1370
- )
1371
- tools_result = await server_connection.session.list_tools()
1372
- new_tools = tools_result.tools or []
1373
- else:
1374
- async with gen_client(
1375
- server_name,
1376
- server_registry=self.context.server_registry,
1377
- client_session_factory=self._create_session_factory(server_name),
1378
- ) as client:
1379
- tools_result = await client.list_tools()
1380
- new_tools = tools_result.tools or []
1365
+ # Fetch new tools from the server using _execute_on_server to properly record stats
1366
+ tools_result = await self._execute_on_server(
1367
+ server_name=server_name,
1368
+ operation_type="tools/list",
1369
+ operation_name="",
1370
+ method_name="list_tools",
1371
+ method_args={},
1372
+ )
1373
+ new_tools = tools_result.tools or []
1381
1374
 
1382
1375
  # Update tool maps
1383
1376
  async with self._tool_map_lock:
@@ -117,6 +117,7 @@ def load_prompt(file: Path) -> List[PromptMessageExtended]:
117
117
  if path_str.endswith(".json"):
118
118
  # JSON files use the serialization module directly
119
119
  from fast_agent.mcp.prompt_serialization import load_messages
120
+
120
121
  return load_messages(str(file))
121
122
  else:
122
123
  # Non-JSON files need template processing for resource loading
@@ -128,15 +129,13 @@ def load_prompt(file: Path) -> List[PromptMessageExtended]:
128
129
  # Render the template without arguments to get the messages
129
130
  messages = create_messages_with_resources(
130
131
  template.content_sections,
131
- [file] # Pass the file path for resource resolution
132
+ [file], # Pass the file path for resource resolution
132
133
  )
133
134
 
134
135
  # Convert to PromptMessageExtended
135
136
  return PromptMessageExtended.to_extended(messages)
136
137
 
137
138
 
138
-
139
-
140
139
  def load_prompt_as_get_prompt_result(file: Path):
141
140
  """
142
141
  Load a prompt from a file and convert to GetPromptResult format for MCP compatibility.
@@ -77,6 +77,19 @@ async def sample(mcp_ctx: ClientSession, params: CreateMessageRequestParams) ->
77
77
  Returns:
78
78
  A CreateMessageResult containing the LLM's response
79
79
  """
80
+ # Get server name for notification tracking
81
+ server_name = "unknown"
82
+ if hasattr(mcp_ctx, "session") and hasattr(mcp_ctx.session, "session_server_name"):
83
+ server_name = mcp_ctx.session.session_server_name or "unknown"
84
+
85
+ # Start tracking sampling operation
86
+ try:
87
+ from fast_agent.ui import notification_tracker
88
+ notification_tracker.start_sampling(server_name)
89
+ except Exception:
90
+ # Don't let notification tracking break sampling
91
+ pass
92
+
80
93
  model: str | None = None
81
94
  api_key: str | None = None
82
95
  try:
@@ -157,6 +170,14 @@ async def sample(mcp_ctx: ClientSession, params: CreateMessageRequestParams) ->
157
170
  return SamplingConverter.error_result(
158
171
  error_message=f"Error in sampling: {str(e)}", model=model
159
172
  )
173
+ finally:
174
+ # End tracking sampling operation
175
+ try:
176
+ from fast_agent.ui import notification_tracker
177
+ notification_tracker.end_sampling(server_name)
178
+ except Exception:
179
+ # Don't let notification tracking break sampling
180
+ pass
160
181
 
161
182
 
162
183
  def sampling_agent_config(
@@ -623,33 +623,38 @@ class ConsoleDisplay:
623
623
  if not self.config or not self.config.logger.show_tools:
624
624
  return
625
625
 
626
- # Combined separator and status line
627
- if agent_name:
628
- left = (
629
- f"[magenta]▎[/magenta][dim magenta]▶[/dim magenta] [magenta]{agent_name}[/magenta]"
630
- )
631
- else:
632
- left = "[magenta]▎[/magenta][dim magenta]▶[/dim magenta]"
626
+ # Check if prompt_toolkit is active
627
+ try:
628
+ from prompt_toolkit.application.current import get_app
633
629
 
634
- right = f"[dim]{updated_server}[/dim]"
635
- self._create_combined_separator_status(left, right)
630
+ app = get_app()
631
+ # We're in interactive mode - add to notification tracker
632
+ from fast_agent.ui import notification_tracker
636
633
 
637
- # Display update message
638
- message = f"Updating tools for server {updated_server}"
639
- console.console.print(message, style="dim", markup=self._markup)
634
+ notification_tracker.add_tool_update(updated_server)
635
+ app.invalidate() # Force toolbar redraw
640
636
 
641
- # Bottom separator
642
- console.console.print()
643
- console.console.print("─" * console.console.size.width, style="dim")
644
- console.console.print()
637
+ except: # noqa: E722
638
+ # No active prompt_toolkit session - display with rich as before
639
+ # Combined separator and status line
640
+ if agent_name:
641
+ left = (
642
+ f"[magenta]▎[/magenta][dim magenta]▶[/dim magenta] [magenta]{agent_name}[/magenta]"
643
+ )
644
+ else:
645
+ left = "[magenta]▎[/magenta][dim magenta]▶[/dim magenta]"
645
646
 
646
- # Force prompt_toolkit redraw if active
647
- try:
648
- from prompt_toolkit.application.current import get_app
647
+ right = f"[dim]{updated_server}[/dim]"
648
+ self._create_combined_separator_status(left, right)
649
649
 
650
- get_app().invalidate() # Forces prompt_toolkit to redraw
651
- except: # noqa: E722
652
- pass # No active prompt_toolkit session
650
+ # Display update message
651
+ message = f"Updating tools for server {updated_server}"
652
+ console.console.print(message, style="dim", markup=self._markup)
653
+
654
+ # Bottom separator
655
+ console.console.print()
656
+ console.console.print("─" * console.console.size.width, style="dim")
657
+ console.console.print()
653
658
 
654
659
  def _create_combined_separator_status(self, left_content: str, right_info: str = "") -> None:
655
660
  """
@@ -339,15 +339,16 @@ class AgentCompleter(Completer):
339
339
  # Map commands to their descriptions for better completion hints
340
340
  self.commands = {
341
341
  "mcp": "Show MCP server status",
342
+ "history": "Show conversation history overview (optionally another agent)",
342
343
  "tools": "List available MCP tools",
343
344
  "prompt": "List and choose MCP prompts, or apply specific prompt (/prompt <name>)",
345
+ "clear": "Clear history",
344
346
  "agents": "List available agents",
345
347
  "system": "Show the current system prompt",
346
348
  "usage": "Show current usage statistics",
347
349
  "markdown": "Show last assistant message without markdown formatting",
348
350
  "save_history": "Save history; .json = MCP JSON, others = Markdown",
349
351
  "help": "Show commands and shortcuts",
350
- "clear": "Clear the screen",
351
352
  "EXIT": "Exit fast-agent, terminating any running workflows",
352
353
  "STOP": "Stop this prompting session and move to next workflow step",
353
354
  **(commands or {}), # Allow custom commands to be passed in
@@ -518,7 +519,15 @@ def create_keybindings(
518
519
 
519
520
  @kb.add("c-l")
520
521
  def _(event) -> None:
521
- """Ctrl+L: Clear the input buffer."""
522
+ """Ctrl+L: Clear and redraw the terminal screen."""
523
+ app_ref = event.app or app
524
+ if app_ref and getattr(app_ref, "renderer", None):
525
+ app_ref.renderer.clear()
526
+ app_ref.invalidate()
527
+
528
+ @kb.add("c-u")
529
+ def _(event) -> None:
530
+ """Ctrl+U: Clear the input buffer."""
522
531
  event.current_buffer.text = ""
523
532
 
524
533
  @kb.add("c-e")
@@ -717,17 +726,44 @@ async def get_enhanced_input(
717
726
  # Version/app label in green (dynamic version)
718
727
  version_segment = f"fast-agent {app_version}"
719
728
 
729
+ # Add notifications - prioritize active events over completed ones
730
+ from fast_agent.ui import notification_tracker
731
+
732
+ notification_segment = ""
733
+
734
+ # Check for active events first (highest priority)
735
+ active_status = notification_tracker.get_active_status()
736
+ if active_status:
737
+ event_type = active_status["type"].upper()
738
+ server = active_status["server"]
739
+ notification_segment = (
740
+ f" | <style fg='ansired' bg='ansiblack'>◀ {event_type} ({server})</style>"
741
+ )
742
+ elif notification_tracker.get_count() > 0:
743
+ # Show completed events summary when no active events
744
+ counts_by_type = notification_tracker.get_counts_by_type()
745
+ total_events = sum(counts_by_type.values()) if counts_by_type else 0
746
+
747
+ if len(counts_by_type) == 1:
748
+ event_type, count = next(iter(counts_by_type.items()))
749
+ label_text = notification_tracker.format_event_label(event_type, count)
750
+ notification_segment = f" | ◀ {label_text}"
751
+ else:
752
+ summary = notification_tracker.get_summary(compact=True)
753
+ heading = "event" if total_events == 1 else "events"
754
+ notification_segment = f" | ◀ {total_events} {heading} ({summary})"
755
+
720
756
  if middle:
721
757
  return HTML(
722
758
  f" <style fg='{toolbar_color}' bg='ansiblack'> {agent_name} </style> "
723
759
  f" {middle} | <style fg='{mode_style}' bg='ansiblack'> {mode_text} </style> | "
724
- f"{version_segment}"
760
+ f"{version_segment}{notification_segment}"
725
761
  )
726
762
  else:
727
763
  return HTML(
728
764
  f" <style fg='{toolbar_color}' bg='ansiblack'> {agent_name} </style> "
729
765
  f"Mode: <style fg='{mode_style}' bg='ansiblack'> {mode_text} </style> | "
730
- f"{version_segment}"
766
+ f"{version_segment}{notification_segment}"
731
767
  )
732
768
 
733
769
  # A more terminal-agnostic style that should work across themes
@@ -766,7 +802,7 @@ async def get_enhanced_input(
766
802
  session.app.key_bindings = bindings
767
803
 
768
804
  # Create formatted prompt text
769
- prompt_text = f"<ansibrightblue>{agent_name}</ansibrightblue> > "
805
+ prompt_text = f"<ansibrightblue>{agent_name}</ansibrightblue> "
770
806
 
771
807
  # Add default value display if requested
772
808
  if show_default and default and default != "STOP":
@@ -808,14 +844,26 @@ async def get_enhanced_input(
808
844
 
809
845
  if cmd == "help":
810
846
  return "HELP"
811
- elif cmd == "clear":
812
- return "CLEAR"
813
847
  elif cmd == "agents":
814
848
  return "LIST_AGENTS"
815
849
  elif cmd == "system":
816
850
  return "SHOW_SYSTEM"
817
851
  elif cmd == "usage":
818
852
  return "SHOW_USAGE"
853
+ elif cmd == "history":
854
+ target_agent = None
855
+ if len(cmd_parts) > 1:
856
+ candidate = cmd_parts[1].strip()
857
+ if candidate:
858
+ target_agent = candidate
859
+ return {"show_history": {"agent": target_agent}}
860
+ elif cmd == "clear":
861
+ target_agent = None
862
+ if len(cmd_parts) > 1:
863
+ candidate = cmd_parts[1].strip()
864
+ if candidate:
865
+ target_agent = candidate
866
+ return {"clear_history": {"agent": target_agent}}
819
867
  elif cmd == "markdown":
820
868
  return "MARKDOWN"
821
869
  elif cmd in ("save_history", "save"):
@@ -994,15 +1042,18 @@ async def handle_special_commands(command, agent_app=None):
994
1042
  if isinstance(command, dict):
995
1043
  return command
996
1044
 
1045
+ global agent_histories
1046
+
997
1047
  # Check for special string commands
998
1048
  if command == "HELP":
999
1049
  rich_print("\n[bold]Available Commands:[/bold]")
1000
1050
  rich_print(" /help - Show this help")
1001
- rich_print(" /clear - Clear screen")
1002
1051
  rich_print(" /agents - List available agents")
1003
1052
  rich_print(" /system - Show the current system prompt")
1004
1053
  rich_print(" /prompt <name> - Apply a specific prompt by name")
1005
1054
  rich_print(" /usage - Show current usage statistics")
1055
+ rich_print(" /history [agent_name] - Show chat history overview")
1056
+ rich_print(" /clear [agent_name] - Clear conversation history (keeps templates)")
1006
1057
  rich_print(" /markdown - Show last assistant message without markdown formatting")
1007
1058
  rich_print(" /mcpstatus - Show MCP server status summary for the active agent")
1008
1059
  rich_print(" /save_history <filename> - Save current chat history to a file")
@@ -1018,15 +1069,11 @@ async def handle_special_commands(command, agent_app=None):
1018
1069
  rich_print(" Ctrl+T - Toggle multiline mode")
1019
1070
  rich_print(" Ctrl+E - Edit in external editor")
1020
1071
  rich_print(" Ctrl+Y - Copy last assistant response to clipboard")
1021
- rich_print(" Ctrl+L - Clear input")
1072
+ rich_print(" Ctrl+L - Redraw the screen")
1073
+ rich_print(" Ctrl+U - Clear input")
1022
1074
  rich_print(" Up/Down - Navigate history")
1023
1075
  return True
1024
1076
 
1025
- elif command == "CLEAR":
1026
- # Clear screen (ANSI escape sequence)
1027
- print("\033c", end="")
1028
- return True
1029
-
1030
1077
  elif isinstance(command, str) and command.upper() == "EXIT":
1031
1078
  raise PromptExitError("User requested to exit fast-agent session")
1032
1079