fast-agent-mcp 0.3.6__py3-none-any.whl → 0.3.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of fast-agent-mcp might be problematic. Click here for more details.

Files changed (40) hide show
  1. fast_agent/__init__.py +9 -1
  2. fast_agent/agents/agent_types.py +11 -11
  3. fast_agent/agents/llm_agent.py +60 -40
  4. fast_agent/agents/llm_decorator.py +351 -7
  5. fast_agent/agents/mcp_agent.py +87 -65
  6. fast_agent/agents/tool_agent.py +50 -4
  7. fast_agent/cli/commands/auth.py +14 -1
  8. fast_agent/cli/commands/go.py +3 -3
  9. fast_agent/constants.py +2 -0
  10. fast_agent/core/agent_app.py +2 -0
  11. fast_agent/core/direct_factory.py +39 -120
  12. fast_agent/core/fastagent.py +2 -2
  13. fast_agent/core/logging/listeners.py +2 -1
  14. fast_agent/history/history_exporter.py +3 -3
  15. fast_agent/interfaces.py +2 -2
  16. fast_agent/llm/fastagent_llm.py +3 -3
  17. fast_agent/llm/model_database.py +7 -1
  18. fast_agent/llm/model_factory.py +2 -3
  19. fast_agent/llm/provider/bedrock/llm_bedrock.py +1 -1
  20. fast_agent/llm/provider/google/llm_google_native.py +1 -3
  21. fast_agent/llm/provider/openai/llm_azure.py +1 -1
  22. fast_agent/llm/provider/openai/llm_openai.py +57 -8
  23. fast_agent/llm/provider/openai/llm_tensorzero_openai.py +1 -1
  24. fast_agent/llm/request_params.py +1 -1
  25. fast_agent/mcp/__init__.py +1 -2
  26. fast_agent/mcp/mcp_aggregator.py +6 -3
  27. fast_agent/mcp/prompt_message_extended.py +2 -0
  28. fast_agent/mcp/prompt_serialization.py +124 -39
  29. fast_agent/mcp/prompts/prompt_load.py +34 -32
  30. fast_agent/mcp/prompts/prompt_server.py +26 -11
  31. fast_agent/resources/setup/fastagent.config.yaml +2 -2
  32. fast_agent/types/__init__.py +3 -1
  33. fast_agent/ui/enhanced_prompt.py +111 -64
  34. fast_agent/ui/interactive_prompt.py +13 -41
  35. fast_agent/ui/rich_progress.py +12 -8
  36. {fast_agent_mcp-0.3.6.dist-info → fast_agent_mcp-0.3.8.dist-info}/METADATA +4 -4
  37. {fast_agent_mcp-0.3.6.dist-info → fast_agent_mcp-0.3.8.dist-info}/RECORD +40 -40
  38. {fast_agent_mcp-0.3.6.dist-info → fast_agent_mcp-0.3.8.dist-info}/WHEEL +0 -0
  39. {fast_agent_mcp-0.3.6.dist-info → fast_agent_mcp-0.3.8.dist-info}/entry_points.txt +0 -0
  40. {fast_agent_mcp-0.3.6.dist-info → fast_agent_mcp-0.3.8.dist-info}/licenses/LICENSE +0 -0
@@ -12,8 +12,6 @@ from fast_agent.core.logging.logger import get_logger
12
12
  from fast_agent.mcp import mime_utils, resource_utils
13
13
  from fast_agent.mcp.prompts.prompt_template import (
14
14
  PromptContent,
15
- PromptTemplate,
16
- PromptTemplateLoader,
17
15
  )
18
16
  from fast_agent.types import PromptMessageExtended
19
17
 
@@ -100,58 +98,62 @@ def create_resource_message(
100
98
  return message_class(content=embedded_resource)
101
99
 
102
100
 
103
- def load_prompt(file: Path) -> List[PromptMessage]:
101
+ def load_prompt(file: Path) -> List[PromptMessageExtended]:
104
102
  """
105
- Load a prompt from a file and return as PromptMessage objects.
103
+ Load a prompt from a file and return as PromptMessageExtended objects.
106
104
 
107
105
  The loader uses file extension to determine the format:
108
- - .json files are loaded as MCP SDK compatible GetPromptResult JSON format
109
- - All other files are loaded using the template-based delimited format
106
+ - .json files are loaded using enhanced format that preserves tool_calls, channels, etc.
107
+ - All other files are loaded using the template-based delimited format with resource loading
110
108
 
111
109
  Args:
112
110
  file: Path to the prompt file
113
111
 
114
112
  Returns:
115
- List of PromptMessage objects
113
+ List of PromptMessageExtended objects with full conversation state
116
114
  """
117
- file_str = str(file).lower()
115
+ path_str = str(file).lower()
118
116
 
119
- if file_str.endswith(".json"):
120
- # Handle JSON format as GetPromptResult
121
- import json
117
+ if path_str.endswith(".json"):
118
+ # JSON files use the serialization module directly
119
+ from fast_agent.mcp.prompt_serialization import load_messages
120
+ return load_messages(str(file))
121
+ else:
122
+ # Non-JSON files need template processing for resource loading
123
+ from fast_agent.mcp.prompts.prompt_template import PromptTemplateLoader
122
124
 
123
- from mcp.types import GetPromptResult
125
+ loader = PromptTemplateLoader()
126
+ template = loader.load_from_file(file)
124
127
 
125
- # Load JSON directly into GetPromptResult
126
- with open(file, "r", encoding="utf-8") as f:
127
- json_data = json.load(f)
128
+ # Render the template without arguments to get the messages
129
+ messages = create_messages_with_resources(
130
+ template.content_sections,
131
+ [file] # Pass the file path for resource resolution
132
+ )
133
+
134
+ # Convert to PromptMessageExtended
135
+ return PromptMessageExtended.to_extended(messages)
128
136
 
129
- # Parse as GetPromptResult object
130
- result = GetPromptResult.model_validate(json_data)
131
137
 
132
- # Return the messages directly
133
- return result.messages
134
- else:
135
- # Template-based format (delimited text)
136
- template: PromptTemplate = PromptTemplateLoader().load_from_file(file)
137
- return create_messages_with_resources(template.content_sections, [file])
138
138
 
139
139
 
140
- def load_prompt_multipart(file: Path) -> List[PromptMessageExtended]:
140
+ def load_prompt_as_get_prompt_result(file: Path):
141
141
  """
142
- Load a prompt from a file and return as PromptMessageExtended objects.
142
+ Load a prompt from a file and convert to GetPromptResult format for MCP compatibility.
143
143
 
144
- The loader uses file extension to determine the format:
145
- - .json files are loaded as MCP SDK compatible GetPromptResult JSON format
146
- - All other files are loaded using the template-based delimited format
144
+ This loses extended fields (tool_calls, channels, etc.) but provides
145
+ compatibility with MCP prompt servers.
147
146
 
148
147
  Args:
149
148
  file: Path to the prompt file
150
149
 
151
150
  Returns:
152
- List of PromptMessageExtended objects
151
+ GetPromptResult object for MCP compatibility
153
152
  """
154
- # First load as regular PromptMessage objects
153
+ from fast_agent.mcp.prompt_serialization import to_get_prompt_result
154
+
155
+ # Load with full data
155
156
  messages = load_prompt(file)
156
- # Then convert to multipart messages
157
- return PromptMessageExtended.to_extended(messages)
157
+
158
+ # Convert to GetPromptResult (loses extended fields)
159
+ return to_get_prompt_result(messages)
@@ -11,7 +11,7 @@ import base64
11
11
  import logging
12
12
  import sys
13
13
  from pathlib import Path
14
- from typing import Any, Awaitable, Callable, Dict, List, Optional
14
+ from typing import Any, Awaitable, Callable, Dict, List, Optional, Union
15
15
 
16
16
  from mcp.server.fastmcp import FastMCP
17
17
  from mcp.server.fastmcp.prompts.base import (
@@ -38,6 +38,7 @@ from fast_agent.mcp.prompts.prompt_template import (
38
38
  PromptMetadata,
39
39
  PromptTemplateLoader,
40
40
  )
41
+ from fast_agent.types import PromptMessageExtended
41
42
 
42
43
  # Configure logging
43
44
  logging.basicConfig(level=logging.ERROR)
@@ -47,13 +48,13 @@ logger = logging.getLogger("prompt_server")
47
48
  mcp = FastMCP("Prompt Server")
48
49
 
49
50
 
50
- def convert_to_fastmcp_messages(prompt_messages: List[PromptMessage]) -> List[Message]:
51
+ def convert_to_fastmcp_messages(prompt_messages: List[Union[PromptMessage, PromptMessageExtended]]) -> List[Message]:
51
52
  """
52
- Convert PromptMessage objects from prompt_load to FastMCP Message objects.
53
- This adapter prevents double-wrapping of messages.
53
+ Convert PromptMessage or PromptMessageExtended objects to FastMCP Message objects.
54
+ This adapter prevents double-wrapping of messages and handles both types.
54
55
 
55
56
  Args:
56
- prompt_messages: List of PromptMessage objects from prompt_load
57
+ prompt_messages: List of PromptMessage or PromptMessageExtended objects
57
58
 
58
59
  Returns:
59
60
  List of FastMCP Message objects
@@ -61,13 +62,27 @@ def convert_to_fastmcp_messages(prompt_messages: List[PromptMessage]) -> List[Me
61
62
  result = []
62
63
 
63
64
  for msg in prompt_messages:
64
- if msg.role == "user":
65
- result.append(UserMessage(content=msg.content))
66
- elif msg.role == "assistant":
67
- result.append(AssistantMessage(content=msg.content))
65
+ # Handle both PromptMessage and PromptMessageExtended
66
+ if hasattr(msg, 'from_multipart'):
67
+ # PromptMessageExtended - convert to regular PromptMessage format
68
+ flat_messages = msg.from_multipart()
69
+ for flat_msg in flat_messages:
70
+ if flat_msg.role == "user":
71
+ result.append(UserMessage(content=flat_msg.content))
72
+ elif flat_msg.role == "assistant":
73
+ result.append(AssistantMessage(content=flat_msg.content))
74
+ else:
75
+ logger.warning(f"Unknown message role: {flat_msg.role}, defaulting to user")
76
+ result.append(UserMessage(content=flat_msg.content))
68
77
  else:
69
- logger.warning(f"Unknown message role: {msg.role}, defaulting to user")
70
- result.append(UserMessage(content=msg.content))
78
+ # Regular PromptMessage - use directly
79
+ if msg.role == "user":
80
+ result.append(UserMessage(content=msg.content))
81
+ elif msg.role == "assistant":
82
+ result.append(AssistantMessage(content=msg.content))
83
+ else:
84
+ logger.warning(f"Unknown message role: {msg.role}, defaulting to user")
85
+ result.append(UserMessage(content=msg.content))
71
86
 
72
87
  return result
73
88
 
@@ -7,10 +7,10 @@
7
7
  # Accepts aliases for Anthropic Models: haiku, haiku3, sonnet, sonnet35, opus, opus3
8
8
  # and OpenAI Models: gpt-4.1, gpt-4.1-mini, o1, o1-mini, o3-mini
9
9
  #
10
- # If not specified, defaults to "haiku".
10
+ # If not specified, defaults to "gpt-5-mini.low".
11
11
  # Can be overriden with a command line switch --model=<model>, or within the Agent constructor.
12
12
 
13
- default_model: haiku
13
+ default_model: gpt-5-mini.low
14
14
  # mcp-ui support: disabled, enabled or auto. "auto" opens the web browser on the asset automatically
15
15
  # mcp_ui_output_dir: ".fast-agent/ui" # Where to write MCP-UI HTML files (relative to CWD if not absolute)
16
16
  # mcp_ui_mode: enabled
@@ -18,7 +18,9 @@ from fast_agent.mcp.helpers.content_helpers import (
18
18
 
19
19
  # Public message model used across providers and MCP integration
20
20
  from fast_agent.mcp.prompt_message_extended import PromptMessageExtended
21
- from fast_agent.types.llm_stop_reason import LlmStopReason
21
+
22
+ # Stop reason enum - imported directly to avoid circular dependency
23
+ from .llm_stop_reason import LlmStopReason
22
24
 
23
25
  __all__ = [
24
26
  # Enums / types
@@ -3,12 +3,13 @@ Enhanced prompt functionality with advanced prompt_toolkit features.
3
3
  """
4
4
 
5
5
  import asyncio
6
+ import json
6
7
  import os
7
8
  import shlex
8
9
  import subprocess
9
10
  import tempfile
10
11
  from importlib.metadata import version
11
- from typing import List, Optional
12
+ from typing import TYPE_CHECKING, List, Optional
12
13
 
13
14
  from prompt_toolkit import PromptSession
14
15
  from prompt_toolkit.completion import Completer, Completion, WordCompleter
@@ -20,9 +21,13 @@ from prompt_toolkit.styles import Style
20
21
  from rich import print as rich_print
21
22
 
22
23
  from fast_agent.agents.agent_types import AgentType
24
+ from fast_agent.constants import FAST_AGENT_ERROR_CHANNEL, FAST_AGENT_REMOVED_METADATA_CHANNEL
23
25
  from fast_agent.core.exceptions import PromptExitError
24
26
  from fast_agent.llm.model_info import get_model_info
25
27
 
28
+ if TYPE_CHECKING:
29
+ from fast_agent.core.agent_app import AgentApp
30
+
26
31
  # Get the application version
27
32
  try:
28
33
  app_version = version("fast-agent-mcp")
@@ -38,6 +43,30 @@ available_agents = set()
38
43
  # Keep track of multi-line mode state
39
44
  in_multiline_mode = False
40
45
 
46
+
47
+ def _extract_alert_flags_from_meta(blocks) -> set[str]:
48
+ flags: set[str] = set()
49
+ for block in blocks or []:
50
+ text = getattr(block, "text", None)
51
+ if not text:
52
+ continue
53
+ try:
54
+ payload = json.loads(text)
55
+ except (TypeError, ValueError):
56
+ continue
57
+ if payload.get("type") != "fast-agent-removed":
58
+ continue
59
+ category = payload.get("category")
60
+ match category:
61
+ case "text":
62
+ flags.add("T")
63
+ case "document":
64
+ flags.add("D")
65
+ case "vision":
66
+ flags.add("V")
67
+ return flags
68
+
69
+
41
70
  # Track whether help text has been shown globally
42
71
  help_message_shown = False
43
72
 
@@ -45,20 +74,17 @@ help_message_shown = False
45
74
  _agent_info_shown = set()
46
75
 
47
76
 
48
- async def _display_agent_info_helper(agent_name: str, agent_provider: object) -> None:
77
+ async def _display_agent_info_helper(agent_name: str, agent_provider: "AgentApp | None") -> None:
49
78
  """Helper function to display agent information."""
50
79
  # Only show once per agent
51
80
  if agent_name in _agent_info_shown:
52
81
  return
53
82
 
54
83
  try:
55
- # Get agent info
56
- if hasattr(agent_provider, "_agent"):
57
- # This is an AgentApp - get the specific agent
58
- agent = agent_provider._agent(agent_name)
59
- else:
60
- # This is a single agent
61
- agent = agent_provider
84
+ # Get agent info from AgentApp
85
+ if agent_provider is None:
86
+ return
87
+ agent = agent_provider._agent(agent_name)
62
88
 
63
89
  # Get counts TODO -- add this to the type library or adjust the way aggregator/reporting works
64
90
  server_count = 0
@@ -148,7 +174,9 @@ async def _display_agent_info_helper(agent_name: str, agent_provider: object) ->
148
174
  pass
149
175
 
150
176
 
151
- async def _display_all_agents_with_hierarchy(available_agents: List[str], agent_provider) -> None:
177
+ async def _display_all_agents_with_hierarchy(
178
+ available_agents: List[str], agent_provider: "AgentApp | None"
179
+ ) -> None:
152
180
  """Display all agents with tree structure for workflow agents."""
153
181
  # Track which agents are children to avoid displaying them twice
154
182
  child_agents = set()
@@ -156,10 +184,9 @@ async def _display_all_agents_with_hierarchy(available_agents: List[str], agent_
156
184
  # First pass: identify all child agents
157
185
  for agent_name in available_agents:
158
186
  try:
159
- if hasattr(agent_provider, "_agent"):
160
- agent = agent_provider._agent(agent_name)
161
- else:
162
- agent = agent_provider
187
+ if agent_provider is None:
188
+ continue
189
+ agent = agent_provider._agent(agent_name)
163
190
 
164
191
  if agent.agent_type == AgentType.PARALLEL:
165
192
  if hasattr(agent, "fan_out_agents") and agent.fan_out_agents:
@@ -184,10 +211,9 @@ async def _display_all_agents_with_hierarchy(available_agents: List[str], agent_
184
211
  continue
185
212
 
186
213
  try:
187
- if hasattr(agent_provider, "_agent"):
188
- agent = agent_provider._agent(agent_name)
189
- else:
190
- agent = agent_provider
214
+ if agent_provider is None:
215
+ continue
216
+ agent = agent_provider._agent(agent_name)
191
217
 
192
218
  # Display parent agent
193
219
  await _display_agent_info_helper(agent_name, agent_provider)
@@ -202,7 +228,7 @@ async def _display_all_agents_with_hierarchy(available_agents: List[str], agent_
202
228
  continue
203
229
 
204
230
 
205
- async def _display_parallel_children(parallel_agent, agent_provider) -> None:
231
+ async def _display_parallel_children(parallel_agent, agent_provider: "AgentApp | None") -> None:
206
232
  """Display child agents of a parallel agent in tree format."""
207
233
  children = []
208
234
 
@@ -222,7 +248,7 @@ async def _display_parallel_children(parallel_agent, agent_provider) -> None:
222
248
  await _display_child_agent_info(child_agent, prefix, agent_provider)
223
249
 
224
250
 
225
- async def _display_router_children(router_agent, agent_provider) -> None:
251
+ async def _display_router_children(router_agent, agent_provider: "AgentApp | None") -> None:
226
252
  """Display child agents of a router agent in tree format."""
227
253
  children = []
228
254
 
@@ -239,7 +265,9 @@ async def _display_router_children(router_agent, agent_provider) -> None:
239
265
  await _display_child_agent_info(child_agent, prefix, agent_provider)
240
266
 
241
267
 
242
- async def _display_child_agent_info(child_agent, prefix: str, agent_provider) -> None:
268
+ async def _display_child_agent_info(
269
+ child_agent, prefix: str, agent_provider: "AgentApp | None"
270
+ ) -> None:
243
271
  """Display info for a child agent with tree prefix."""
244
272
  try:
245
273
  # Get counts for child agent
@@ -425,7 +453,9 @@ def get_text_from_editor(initial_text: str = "") -> str:
425
453
  return edited_text.strip() # Added strip() to remove trailing newlines often added by editors
426
454
 
427
455
 
428
- def create_keybindings(on_toggle_multiline=None, app=None, agent_provider=None, agent_name=None):
456
+ def create_keybindings(
457
+ on_toggle_multiline=None, app=None, agent_provider: "AgentApp | None" = None, agent_name=None
458
+ ):
429
459
  """Create custom key bindings."""
430
460
  kb = KeyBindings()
431
461
 
@@ -504,30 +534,20 @@ def create_keybindings(on_toggle_multiline=None, app=None, agent_provider=None,
504
534
  """Ctrl+Y: Copy last assistant response to clipboard."""
505
535
  if kb.agent_provider and kb.current_agent_name:
506
536
  try:
507
- # Get the agent
508
- if hasattr(kb.agent_provider, "_agent"):
509
- agent = kb.agent_provider._agent(kb.current_agent_name)
510
- else:
511
- agent = kb.agent_provider
512
-
513
- # Get message history
514
- if hasattr(agent, "_llm") and agent._llm and agent._llm.message_history:
515
- # Find last assistant message
516
- for msg in reversed(agent._llm.message_history):
517
- if msg.role == "assistant":
518
- content = msg.last_text()
519
- import pyperclip
520
-
521
- pyperclip.copy(content)
522
- rich_print("\n[green]✓ Copied to clipboard[/green]")
523
- return
524
-
525
- else:
526
- pass
537
+ # Get the agent from AgentApp
538
+ agent = kb.agent_provider._agent(kb.current_agent_name)
539
+
540
+ # Find last assistant message
541
+ for msg in reversed(agent.message_history):
542
+ if msg.role == "assistant":
543
+ content = msg.last_text()
544
+ import pyperclip
545
+
546
+ pyperclip.copy(content)
547
+ rich_print("\n[green]✓ Copied to clipboard[/green]")
548
+ return
527
549
  except Exception:
528
550
  pass
529
- else:
530
- pass
531
551
 
532
552
  return kb
533
553
 
@@ -542,7 +562,7 @@ async def get_enhanced_input(
542
562
  agent_types: dict[str, AgentType] = None,
543
563
  is_human_input: bool = False,
544
564
  toolbar_color: str = "ansiblue",
545
- agent_provider: object = None,
565
+ agent_provider: "AgentApp | None" = None,
546
566
  ) -> str:
547
567
  """
548
568
  Enhanced input with advanced prompt_toolkit features.
@@ -557,7 +577,7 @@ async def get_enhanced_input(
557
577
  agent_types: Dictionary mapping agent names to their types for display
558
578
  is_human_input: Whether this is a human input request (disables agent selection features)
559
579
  toolbar_color: Color to use for the agent name in the toolbar (default: "ansiblue")
560
- agent_provider: Optional agent provider for displaying agent info
580
+ agent_provider: Optional AgentApp for displaying agent info
561
581
 
562
582
  Returns:
563
583
  User input string
@@ -598,18 +618,22 @@ async def get_enhanced_input(
598
618
 
599
619
  shortcut_text = " | ".join(f"{key}:{action}" for key, action in shortcuts)
600
620
 
601
- # Resolve model name and TDV from the current agent if available
621
+ # Resolve model name, turn counter, and TDV from the current agent if available
602
622
  model_display = None
603
623
  tdv_segment = None
624
+ turn_count = 0
604
625
  try:
605
- agent_obj = (
606
- agent_provider._agent(agent_name)
607
- if agent_provider and hasattr(agent_provider, "_agent")
608
- else agent_provider
609
- )
610
- if agent_obj and hasattr(agent_obj, "llm") and agent_obj.llm:
611
- model_name = getattr(agent_obj.llm, "model_name", None)
612
- if model_name:
626
+ if agent_provider:
627
+ agent = agent_provider._agent(agent_name)
628
+
629
+ # Get turn count from message history
630
+ for message in agent.message_history:
631
+ if message.role == "user":
632
+ turn_count += 1
633
+
634
+ # Get model name from LLM
635
+ if agent.llm and agent.llm.model_name:
636
+ model_name = agent.llm.model_name
613
637
  # Truncate model name to max 25 characters with ellipsis
614
638
  max_len = 25
615
639
  if len(model_name) > max_len:
@@ -619,20 +643,35 @@ async def get_enhanced_input(
619
643
  model_display = model_name
620
644
 
621
645
  # Build TDV capability segment based on model database
622
- info = get_model_info(agent_obj)
646
+ info = get_model_info(agent)
623
647
  # Default to text-only if info resolution fails for any reason
624
648
  t, d, v = (True, False, False)
625
649
  if info:
626
650
  t, d, v = info.tdv_flags
627
651
 
652
+ # Check for alert flags in user messages
653
+ alert_flags: set[str] = set()
654
+ error_seen = False
655
+ for message in agent.message_history:
656
+ if message.channels:
657
+ if message.channels.get(FAST_AGENT_ERROR_CHANNEL):
658
+ error_seen = True
659
+ if message.role == "user" and message.channels:
660
+ meta_blocks = message.channels.get(FAST_AGENT_REMOVED_METADATA_CHANNEL, [])
661
+ alert_flags.update(_extract_alert_flags_from_meta(meta_blocks))
662
+
663
+ if error_seen and not alert_flags:
664
+ alert_flags.add("T")
665
+
628
666
  def _style_flag(letter: str, supported: bool) -> str:
629
667
  # Enabled uses the same color as NORMAL mode (ansigreen), disabled is dim
668
+ if letter in alert_flags:
669
+ return f"<style fg='ansired' bg='ansiblack'>{letter}</style>"
670
+
630
671
  enabled_color = "ansigreen"
631
- return (
632
- f"<style fg='{enabled_color}' bg='ansiblack'>{letter}</style>"
633
- if supported
634
- else f"<style fg='ansiblack' bg='ansiwhite'>{letter}</style>"
635
- )
672
+ if supported:
673
+ return f"<style fg='{enabled_color}' bg='ansiblack'>{letter}</style>"
674
+ return f"<style fg='ansiblack' bg='ansiwhite'>{letter}</style>"
636
675
 
637
676
  tdv_segment = f"{_style_flag('T', t)}{_style_flag('D', d)}{_style_flag('V', v)}"
638
677
  except Exception:
@@ -640,7 +679,7 @@ async def get_enhanced_input(
640
679
  model_display = None
641
680
  tdv_segment = None
642
681
 
643
- # Build dynamic middle segments: model (in green) and optional shortcuts
682
+ # Build dynamic middle segments: model (in green), turn counter, and optional shortcuts
644
683
  middle_segments = []
645
684
  if model_display:
646
685
  # Model chip + inline TDV flags
@@ -650,6 +689,10 @@ async def get_enhanced_input(
650
689
  )
651
690
  else:
652
691
  middle_segments.append(f"<style bg='ansigreen'>{model_display}</style>")
692
+
693
+ # Add turn counter (formatted as 3 digits)
694
+ middle_segments.append(f"{turn_count:03d}")
695
+
653
696
  if shortcut_text:
654
697
  middle_segments.append(shortcut_text)
655
698
  middle = " | ".join(middle_segments)
@@ -761,7 +804,9 @@ async def get_enhanced_input(
761
804
  elif cmd in ("save_history", "save"):
762
805
  # Return a structured action for the interactive loop to handle
763
806
  # Prefer programmatic saving via HistoryExporter; fall back to magic-string there if needed
764
- filename = cmd_parts[1].strip() if len(cmd_parts) > 1 and cmd_parts[1].strip() else None
807
+ filename = (
808
+ cmd_parts[1].strip() if len(cmd_parts) > 1 and cmd_parts[1].strip() else None
809
+ )
765
810
  return {"save_history": True, "filename": filename}
766
811
  elif cmd == "prompt":
767
812
  # Handle /prompt with no arguments as interactive mode
@@ -941,7 +986,9 @@ async def handle_special_commands(command, agent_app=None):
941
986
  rich_print(" /usage - Show current usage statistics")
942
987
  rich_print(" /markdown - Show last assistant message without markdown formatting")
943
988
  rich_print(" /save_history <filename> - Save current chat history to a file")
944
- rich_print(" [dim]Tip: Use a .json extension for MCP-compatible JSON; any other extension saves Markdown.[/dim]")
989
+ rich_print(
990
+ " [dim]Tip: Use a .json extension for MCP-compatible JSON; any other extension saves Markdown.[/dim]"
991
+ )
945
992
  rich_print(" @agent_name - Switch to agent")
946
993
  rich_print(" STOP - Return control back to the workflow")
947
994
  rich_print(" EXIT - Exit fast-agent, terminating any running workflows")
@@ -14,14 +14,16 @@ Usage:
14
14
  )
15
15
  """
16
16
 
17
- from typing import Awaitable, Callable, Dict, List, Mapping, Optional, Protocol, Union
17
+ from typing import TYPE_CHECKING, Awaitable, Callable, Dict, List, Optional, Union
18
+
19
+ if TYPE_CHECKING:
20
+ from fast_agent.core.agent_app import AgentApp
18
21
 
19
22
  from mcp.types import Prompt, PromptMessage
20
23
  from rich import print as rich_print
21
24
 
22
25
  from fast_agent.agents.agent_types import AgentType
23
26
  from fast_agent.history.history_exporter import HistoryExporter
24
- from fast_agent.interfaces import AgentProtocol
25
27
  from fast_agent.mcp.mcp_aggregator import SEP
26
28
  from fast_agent.types import PromptMessageExtended
27
29
  from fast_agent.ui.enhanced_prompt import (
@@ -41,36 +43,6 @@ SendFunc = Callable[[Union[str, PromptMessage, PromptMessageExtended], str], Awa
41
43
  AgentGetter = Callable[[str], Optional[object]]
42
44
 
43
45
 
44
- class PromptProvider(Protocol):
45
- """Protocol for objects that can provide prompt functionality."""
46
-
47
- async def list_prompts(
48
- self, namespace: Optional[str] = None, agent_name: Optional[str] = None
49
- ) -> Mapping[str, List[Prompt]]:
50
- """List available prompts."""
51
- ...
52
-
53
- async def apply_prompt(
54
- self,
55
- prompt_name: str,
56
- prompt_title: Optional[str] = None,
57
- arguments: Optional[Dict[str, str]] = None,
58
- agent_name: Optional[str] = None,
59
- as_template: bool = False,
60
- **kwargs,
61
- ) -> str:
62
- """Apply a prompt."""
63
- ...
64
-
65
- def _agent(self, agent_name: str) -> AgentProtocol:
66
- """Return the concrete agent by name (AgentApp provides this)."""
67
- ...
68
-
69
- def _show_turn_usage(self, agent_name: str) -> None:
70
- """Display usage for a given agent after a turn."""
71
- ...
72
-
73
-
74
46
  class InteractivePrompt:
75
47
  """
76
48
  Provides interactive prompt functionality that works with any agent implementation.
@@ -91,7 +63,7 @@ class InteractivePrompt:
91
63
  send_func: SendFunc,
92
64
  default_agent: str,
93
65
  available_agents: List[str],
94
- prompt_provider: PromptProvider,
66
+ prompt_provider: "AgentApp",
95
67
  default: str = "",
96
68
  ) -> str:
97
69
  """
@@ -101,7 +73,7 @@ class InteractivePrompt:
101
73
  send_func: Function to send messages to agents
102
74
  default_agent: Name of the default agent to use
103
75
  available_agents: List of available agent names
104
- prompt_provider: Optional provider that implements list_prompts and apply_prompt
76
+ prompt_provider: AgentApp instance for accessing agents and prompts
105
77
  default: Default message to use when user presses enter
106
78
 
107
79
  Returns:
@@ -290,7 +262,7 @@ class InteractivePrompt:
290
262
  rich_print()
291
263
 
292
264
  async def _get_all_prompts(
293
- self, prompt_provider: PromptProvider, agent_name: Optional[str] = None
265
+ self, prompt_provider: "AgentApp", agent_name: Optional[str] = None
294
266
  ):
295
267
  """
296
268
  Get a list of all available prompts.
@@ -370,7 +342,7 @@ class InteractivePrompt:
370
342
  rich_print(f"[dim]{traceback.format_exc()}[/dim]")
371
343
  return []
372
344
 
373
- async def _list_prompts(self, prompt_provider: PromptProvider, agent_name: str) -> None:
345
+ async def _list_prompts(self, prompt_provider: "AgentApp", agent_name: str) -> None:
374
346
  """
375
347
  List available prompts for an agent.
376
348
 
@@ -474,7 +446,7 @@ class InteractivePrompt:
474
446
 
475
447
  async def _select_prompt(
476
448
  self,
477
- prompt_provider: PromptProvider,
449
+ prompt_provider: "AgentApp",
478
450
  agent_name: str,
479
451
  requested_name: Optional[str] = None,
480
452
  send_func: Optional[SendFunc] = None,
@@ -808,7 +780,7 @@ class InteractivePrompt:
808
780
  rich_print(f"[red]Error selecting or applying prompt: {e}[/red]")
809
781
  rich_print(f"[dim]{traceback.format_exc()}[/dim]")
810
782
 
811
- async def _list_tools(self, prompt_provider: PromptProvider, agent_name: str) -> None:
783
+ async def _list_tools(self, prompt_provider: "AgentApp", agent_name: str) -> None:
812
784
  """
813
785
  List available tools for an agent.
814
786
 
@@ -908,7 +880,7 @@ class InteractivePrompt:
908
880
  rich_print(f"[red]Error listing tools: {e}[/red]")
909
881
  rich_print(f"[dim]{traceback.format_exc()}[/dim]")
910
882
 
911
- async def _show_usage(self, prompt_provider: PromptProvider, agent_name: str) -> None:
883
+ async def _show_usage(self, prompt_provider: "AgentApp", agent_name: str) -> None:
912
884
  """
913
885
  Show usage statistics for the current agent(s) in a colorful table format.
914
886
 
@@ -930,7 +902,7 @@ class InteractivePrompt:
930
902
  except Exception as e:
931
903
  rich_print(f"[red]Error showing usage: {e}[/red]")
932
904
 
933
- async def _show_system(self, prompt_provider: PromptProvider, agent_name: str) -> None:
905
+ async def _show_system(self, prompt_provider: "AgentApp", agent_name: str) -> None:
934
906
  """
935
907
  Show the current system prompt for the agent.
936
908
 
@@ -980,7 +952,7 @@ class InteractivePrompt:
980
952
  rich_print(f"[red]Error showing system prompt: {e}[/red]")
981
953
  rich_print(f"[dim]{traceback.format_exc()}[/dim]")
982
954
 
983
- async def _show_markdown(self, prompt_provider: PromptProvider, agent_name: str) -> None:
955
+ async def _show_markdown(self, prompt_provider: "AgentApp", agent_name: str) -> None:
984
956
  """
985
957
  Show the last assistant message without markdown formatting.
986
958