fast-agent-mcp 0.3.13__py3-none-any.whl → 0.3.15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of fast-agent-mcp might be problematic. Click here for more details.

Files changed (44) hide show
  1. fast_agent/agents/llm_agent.py +59 -37
  2. fast_agent/agents/llm_decorator.py +13 -2
  3. fast_agent/agents/mcp_agent.py +21 -5
  4. fast_agent/agents/tool_agent.py +41 -29
  5. fast_agent/agents/workflow/router_agent.py +2 -1
  6. fast_agent/cli/commands/check_config.py +48 -1
  7. fast_agent/config.py +65 -2
  8. fast_agent/constants.py +3 -0
  9. fast_agent/context.py +42 -9
  10. fast_agent/core/fastagent.py +14 -1
  11. fast_agent/core/logging/listeners.py +1 -1
  12. fast_agent/core/validation.py +31 -33
  13. fast_agent/event_progress.py +2 -3
  14. fast_agent/human_input/form_fields.py +4 -1
  15. fast_agent/interfaces.py +12 -2
  16. fast_agent/llm/fastagent_llm.py +31 -0
  17. fast_agent/llm/model_database.py +2 -2
  18. fast_agent/llm/model_factory.py +8 -1
  19. fast_agent/llm/provider_key_manager.py +1 -0
  20. fast_agent/llm/provider_types.py +1 -0
  21. fast_agent/llm/request_params.py +3 -1
  22. fast_agent/mcp/mcp_aggregator.py +313 -40
  23. fast_agent/mcp/mcp_connection_manager.py +39 -9
  24. fast_agent/mcp/prompt_message_extended.py +2 -2
  25. fast_agent/mcp/skybridge.py +45 -0
  26. fast_agent/mcp/sse_tracking.py +287 -0
  27. fast_agent/mcp/transport_tracking.py +37 -3
  28. fast_agent/mcp/types.py +24 -0
  29. fast_agent/resources/examples/workflows/router.py +1 -0
  30. fast_agent/resources/setup/fastagent.config.yaml +7 -1
  31. fast_agent/ui/console_display.py +946 -84
  32. fast_agent/ui/elicitation_form.py +23 -1
  33. fast_agent/ui/enhanced_prompt.py +153 -58
  34. fast_agent/ui/interactive_prompt.py +57 -34
  35. fast_agent/ui/markdown_truncator.py +942 -0
  36. fast_agent/ui/mcp_display.py +110 -29
  37. fast_agent/ui/plain_text_truncator.py +68 -0
  38. fast_agent/ui/rich_progress.py +4 -1
  39. fast_agent/ui/streaming_buffer.py +449 -0
  40. {fast_agent_mcp-0.3.13.dist-info → fast_agent_mcp-0.3.15.dist-info}/METADATA +4 -3
  41. {fast_agent_mcp-0.3.13.dist-info → fast_agent_mcp-0.3.15.dist-info}/RECORD +44 -38
  42. {fast_agent_mcp-0.3.13.dist-info → fast_agent_mcp-0.3.15.dist-info}/WHEEL +0 -0
  43. {fast_agent_mcp-0.3.13.dist-info → fast_agent_mcp-0.3.15.dist-info}/entry_points.txt +0 -0
  44. {fast_agent_mcp-0.3.13.dist-info → fast_agent_mcp-0.3.15.dist-info}/licenses/LICENSE +0 -0
fast_agent/context.py CHANGED
@@ -1,10 +1,10 @@
1
- """
2
- A central context object to store global state that is shared across the application.
3
- """
1
+ from __future__ import annotations
4
2
 
5
3
  import asyncio
6
4
  import concurrent.futures
5
+ import logging
7
6
  import uuid
7
+ from pathlib import Path
8
8
  from typing import TYPE_CHECKING, Any, Optional, Union
9
9
 
10
10
  from opentelemetry import trace
@@ -29,12 +29,18 @@ from fast_agent.mcp_server_registry import ServerRegistry
29
29
 
30
30
  if TYPE_CHECKING:
31
31
  from fast_agent.core.executor.workflow_signal import SignalWaitCallback
32
+ from fast_agent.mcp.mcp_connection_manager import MCPConnectionManager
32
33
  else:
33
34
  # Runtime placeholders for the types
34
35
  SignalWaitCallback = Any
36
+ MCPConnectionManager = Any
35
37
 
36
38
  logger = get_logger(__name__)
37
39
 
40
+ """
41
+ A central context object to store global state that is shared across the application.
42
+ """
43
+
38
44
 
39
45
  class Context(BaseModel):
40
46
  """
@@ -52,6 +58,7 @@ class Context(BaseModel):
52
58
  task_registry: Optional[ActivityRegistry] = None
53
59
 
54
60
  tracer: trace.Tracer | None = None
61
+ _connection_manager: "MCPConnectionManager | None" = None
55
62
 
56
63
  model_config = ConfigDict(
57
64
  extra="allow",
@@ -130,16 +137,42 @@ async def configure_logger(config: "Settings") -> None:
130
137
  """
131
138
  Configure logging and tracing based on the application config.
132
139
  """
140
+ settings = config.logger
141
+
142
+ # Configure the standard Python logger used by LoggingListener so it respects settings.
143
+ python_logger = logging.getLogger("fast_agent")
144
+ python_logger.handlers.clear()
145
+ python_logger.setLevel(settings.level.upper())
146
+ python_logger.propagate = False
147
+
148
+ handler: logging.Handler
149
+ if settings.type == "console":
150
+ handler = logging.StreamHandler()
151
+ elif settings.type == "file":
152
+ log_path = Path(settings.path)
153
+ if log_path.parent:
154
+ log_path.parent.mkdir(parents=True, exist_ok=True)
155
+ handler = logging.FileHandler(log_path)
156
+ elif settings.type == "none":
157
+ handler = logging.NullHandler()
158
+ else:
159
+ # For transports that handle output elsewhere (e.g., HTTP), suppress console output.
160
+ handler = logging.NullHandler()
161
+
162
+ handler.setLevel(settings.level.upper())
163
+ handler.setFormatter(logging.Formatter("%(message)s"))
164
+ python_logger.addHandler(handler)
165
+
133
166
  # Use StreamingExclusionFilter to prevent streaming events from flooding logs
134
- event_filter: EventFilter = StreamingExclusionFilter(min_level=config.logger.level)
135
- logger.info(f"Configuring logger with level: {config.logger.level}")
136
- transport = create_transport(settings=config.logger, event_filter=event_filter)
167
+ event_filter: EventFilter = StreamingExclusionFilter(min_level=settings.level)
168
+ logger.info(f"Configuring logger with level: {settings.level}")
169
+ transport = create_transport(settings=settings, event_filter=event_filter)
137
170
  await LoggingConfig.configure(
138
171
  event_filter=event_filter,
139
172
  transport=transport,
140
- batch_size=config.logger.batch_size,
141
- flush_interval=config.logger.flush_interval,
142
- progress_display=config.logger.progress_display,
173
+ batch_size=settings.batch_size,
174
+ flush_interval=settings.flush_interval,
175
+ progress_display=settings.progress_display,
143
176
  )
144
177
 
145
178
 
@@ -293,7 +293,9 @@ class FastAgent:
293
293
  default: bool = False,
294
294
  elicitation_handler: Optional[ElicitationFnT] = None,
295
295
  api_key: str | None = None,
296
- ) -> Callable[[Callable[P, Coroutine[Any, Any, R]]], Callable[P, Coroutine[Any, Any, R]]]: ...
296
+ ) -> Callable[
297
+ [Callable[P, Coroutine[Any, Any, R]]], Callable[P, Coroutine[Any, Any, R]]
298
+ ]: ...
297
299
 
298
300
  def custom(
299
301
  self,
@@ -474,6 +476,17 @@ class FastAgent:
474
476
  # Create a wrapper with all agents for simplified access
475
477
  wrapper = AgentApp(active_agents)
476
478
 
479
+ # Disable streaming if parallel agents are present
480
+ from fast_agent.agents.agent_types import AgentType
481
+
482
+ has_parallel = any(
483
+ agent.agent_type == AgentType.PARALLEL for agent in active_agents.values()
484
+ )
485
+ if has_parallel:
486
+ cfg = self.app.context.config
487
+ if cfg is not None and cfg.logger is not None:
488
+ cfg.logger.streaming = "none"
489
+
477
490
  # Handle command line options that should be processed after agent initialization
478
491
 
479
492
  # Handle --server option
@@ -70,7 +70,7 @@ def convert_log_event(event: Event) -> "ProgressEvent | None":
70
70
 
71
71
  # Extract streaming token count for STREAMING actions
72
72
  streaming_tokens = None
73
- if progress_action == ProgressAction.STREAMING:
73
+ if progress_action == ProgressAction.STREAMING or progress_action == ProgressAction.THINKING:
74
74
  streaming_tokens = event_data.get("details", "")
75
75
 
76
76
  # Extract progress data for TOOL_PROGRESS actions
@@ -200,6 +200,34 @@ def get_dependencies(
200
200
  return deps
201
201
 
202
202
 
203
+ def get_agent_dependencies(agent_data: dict[str, Any]) -> set[str]:
204
+ deps: set[str] = set()
205
+ agent_dependency_attribute_names = {
206
+ AgentType.CHAIN: ("sequence",),
207
+ AgentType.EVALUATOR_OPTIMIZER: ("evaluator", "generator", "eval_optimizer_agents"),
208
+ AgentType.ITERATIVE_PLANNER: ("child_agents",),
209
+ AgentType.ORCHESTRATOR: ("child_agents",),
210
+ AgentType.PARALLEL: ("fan_out", "fan_in", "parallel_agents"),
211
+ AgentType.ROUTER: ("router_agents",),
212
+ }
213
+ agent_type = agent_data["type"]
214
+ dependency_names = agent_dependency_attribute_names.get(agent_type, None)
215
+ if dependency_names is None:
216
+ return deps
217
+
218
+ for dependency_name in dependency_names:
219
+ dependency_value = agent_data.get(dependency_name)
220
+ if dependency_value is None:
221
+ continue
222
+ if isinstance(dependency_value, str):
223
+ deps.add(dependency_value)
224
+ else:
225
+ # here, we have an implicit assumption that if it is not a None or a string, then it is a list
226
+ deps.update(dependency_value)
227
+
228
+ return deps
229
+
230
+
203
231
  def get_dependencies_groups(
204
232
  agents_dict: Dict[str, Dict[str, Any]], allow_cycles: bool = False
205
233
  ) -> List[List[str]]:
@@ -221,39 +249,9 @@ def get_dependencies_groups(
221
249
  agent_names = list(agents_dict.keys())
222
250
 
223
251
  # Dictionary to store dependencies for each agent
224
- dependencies = {name: set() for name in agent_names}
225
-
226
- # Build the dependency graph
227
- for name, agent_data in agents_dict.items():
228
- agent_type = agent_data["type"] # This is a string from config
229
-
230
- # Note: Compare string values from config with the Enum's string value
231
- if agent_type == AgentType.PARALLEL.value:
232
- # Parallel agents depend on their fan-out and fan-in agents
233
- dependencies[name].update(agent_data.get("parallel_agents", []))
234
- # Also add explicit fan_out dependencies if present
235
- if "fan_out" in agent_data:
236
- dependencies[name].update(agent_data["fan_out"])
237
- # Add explicit fan_in dependency if present
238
- if "fan_in" in agent_data and agent_data["fan_in"]:
239
- dependencies[name].add(agent_data["fan_in"])
240
- elif agent_type == AgentType.CHAIN.value:
241
- # Chain agents depend on the agents in their sequence
242
- dependencies[name].update(agent_data.get("sequence", []))
243
- elif agent_type == AgentType.ROUTER.value:
244
- # Router agents depend on the agents they route to
245
- dependencies[name].update(agent_data.get("router_agents", []))
246
- elif agent_type == AgentType.ORCHESTRATOR.value:
247
- # Orchestrator agents depend on their child agents
248
- dependencies[name].update(agent_data.get("child_agents", []))
249
- elif agent_type == AgentType.EVALUATOR_OPTIMIZER.value:
250
- # Evaluator-Optimizer agents depend on their evaluator and generator agents
251
- if "evaluator" in agent_data:
252
- dependencies[name].add(agent_data["evaluator"])
253
- if "generator" in agent_data:
254
- dependencies[name].add(agent_data["generator"])
255
- # For backward compatibility - also check eval_optimizer_agents if present
256
- dependencies[name].update(agent_data.get("eval_optimizer_agents", []))
252
+ dependencies = {
253
+ name: get_agent_dependencies(agent_data) for name, agent_data in agents_dict.items()
254
+ }
257
255
 
258
256
  # Check for cycles if not allowed
259
257
  if not allow_cycles:
@@ -14,6 +14,7 @@ class ProgressAction(str, Enum):
14
14
  INITIALIZED = "Initialized"
15
15
  CHATTING = "Chatting"
16
16
  STREAMING = "Streaming" # Special action for real-time streaming updates
17
+ THINKING = "Thinking" # Special action for real-time thinking updates
17
18
  ROUTING = "Routing"
18
19
  PLANNING = "Planning"
19
20
  READY = "Ready"
@@ -53,9 +54,7 @@ class ProgressEvent(BaseModel):
53
54
  base = f"{self.action.ljust(11)}. {self.target}"
54
55
  if self.details:
55
56
  base += f" - {self.details}"
56
-
57
+
57
58
  if self.agent_name:
58
59
  base = f"[{self.agent_name}] {base}"
59
60
  return base
60
-
61
-
@@ -29,6 +29,8 @@ class StringField:
29
29
  schema["minLength"] = self.min_length
30
30
  if self.max_length is not None:
31
31
  schema["maxLength"] = self.max_length
32
+ if self.pattern is not None:
33
+ schema["pattern"] = self.pattern
32
34
  if self.format:
33
35
  schema["format"] = self.format
34
36
 
@@ -178,10 +180,11 @@ def string(
178
180
  default: Optional[str] = None,
179
181
  min_length: Optional[int] = None,
180
182
  max_length: Optional[int] = None,
183
+ pattern: Optional[str] = None,
181
184
  format: Optional[str] = None,
182
185
  ) -> StringField:
183
186
  """Create a string field."""
184
- return StringField(title, description, default, min_length, max_length, format)
187
+ return StringField(title, description, default, min_length, max_length, pattern, format)
185
188
 
186
189
 
187
190
  def email(
fast_agent/interfaces.py CHANGED
@@ -8,6 +8,7 @@ without pulling in MCP-specific code, helping to avoid circular imports.
8
8
  from typing import (
9
9
  TYPE_CHECKING,
10
10
  Any,
11
+ Callable,
11
12
  Dict,
12
13
  List,
13
14
  Mapping,
@@ -21,7 +22,7 @@ from typing import (
21
22
 
22
23
  from a2a.types import AgentCard
23
24
  from mcp import Tool
24
- from mcp.types import GetPromptResult, Prompt, PromptMessage, ReadResourceResult
25
+ from mcp.types import GetPromptResult, ListToolsResult, Prompt, PromptMessage, ReadResourceResult
25
26
  from pydantic import BaseModel
26
27
  from rich.text import Text
27
28
 
@@ -79,6 +80,13 @@ class FastAgentLLMProtocol(Protocol):
79
80
  self, prompt_result: "GetPromptResult", prompt_name: str
80
81
  ) -> str: ...
81
82
 
83
+ def get_request_params(
84
+ self,
85
+ request_params: RequestParams | None = None,
86
+ ) -> RequestParams: ...
87
+
88
+ def add_stream_listener(self, listener: Callable[[str], None]) -> Callable[[], None]: ...
89
+
82
90
  @property
83
91
  def message_history(self) -> List[PromptMessageExtended]: ...
84
92
 
@@ -116,7 +124,7 @@ class LlmAgentProtocol(Protocol):
116
124
  def clear(self, *, clear_prompts: bool = False) -> None: ...
117
125
 
118
126
 
119
- class AgentProtocol(LlmAgentProtocol):
127
+ class AgentProtocol(LlmAgentProtocol, Protocol):
120
128
  """Standard agent interface with flexible input types."""
121
129
 
122
130
  async def __call__(
@@ -190,6 +198,8 @@ class AgentProtocol(LlmAgentProtocol):
190
198
 
191
199
  async def list_mcp_tools(self, namespace: str | None = None) -> Mapping[str, List[Tool]]: ...
192
200
 
201
+ async def list_tools(self) -> ListToolsResult: ...
202
+
193
203
  async def get_resource(
194
204
  self, resource_uri: str, namespace: str | None = None
195
205
  ) -> ReadResourceResult: ...
@@ -3,6 +3,7 @@ from contextvars import ContextVar
3
3
  from typing import (
4
4
  TYPE_CHECKING,
5
5
  Any,
6
+ Callable,
6
7
  Dict,
7
8
  Generic,
8
9
  List,
@@ -157,6 +158,7 @@ class FastAgentLLM(ContextDependent, FastAgentLLMProtocol, Generic[MessageParamT
157
158
 
158
159
  # Initialize usage tracking
159
160
  self._usage_accumulator = UsageAccumulator()
161
+ self._stream_listeners: set[Callable[[str], None]] = set()
160
162
 
161
163
  def _initialize_default_params(self, kwargs: dict) -> RequestParams:
162
164
  """Initialize default parameters for the LLM.
@@ -483,6 +485,8 @@ class FastAgentLLM(ContextDependent, FastAgentLLMProtocol, Generic[MessageParamT
483
485
  Returns:
484
486
  Updated estimated token count
485
487
  """
488
+ self._notify_stream_listeners(content)
489
+
486
490
  # Rough estimate: 1 token per 4 characters (OpenAI's typical ratio)
487
491
  text_length = len(content)
488
492
  additional_tokens = max(1, text_length // 4)
@@ -503,6 +507,33 @@ class FastAgentLLM(ContextDependent, FastAgentLLMProtocol, Generic[MessageParamT
503
507
 
504
508
  return new_total
505
509
 
510
+ def add_stream_listener(self, listener: Callable[[str], None]) -> Callable[[], None]:
511
+ """
512
+ Register a callback invoked with streaming text chunks.
513
+
514
+ Args:
515
+ listener: Callable receiving the text chunk emitted by the provider.
516
+
517
+ Returns:
518
+ A function that removes the listener when called.
519
+ """
520
+ self._stream_listeners.add(listener)
521
+
522
+ def remove() -> None:
523
+ self._stream_listeners.discard(listener)
524
+
525
+ return remove
526
+
527
+ def _notify_stream_listeners(self, chunk: str) -> None:
528
+ """Notify registered listeners with a streaming text chunk."""
529
+ if not chunk:
530
+ return
531
+ for listener in list(self._stream_listeners):
532
+ try:
533
+ listener(chunk)
534
+ except Exception:
535
+ self.logger.exception("Stream listener raised an exception")
536
+
506
537
  def _log_chat_finished(self, model: Optional[str] = None) -> None:
507
538
  """Log a chat finished event"""
508
539
  data = {
@@ -130,11 +130,9 @@ class ModelDatabase:
130
130
  context_window=400000, max_output_tokens=128000, tokenizes=OPENAI_MULTIMODAL
131
131
  )
132
132
 
133
- # TODO update to 32000
134
133
  ANTHROPIC_OPUS_4_VERSIONED = ModelParameters(
135
134
  context_window=200000, max_output_tokens=32000, tokenizes=ANTHROPIC_MULTIMODAL
136
135
  )
137
- # TODO update to 64000
138
136
  ANTHROPIC_SONNET_4_VERSIONED = ModelParameters(
139
137
  context_window=200000, max_output_tokens=64000, tokenizes=ANTHROPIC_MULTIMODAL
140
138
  )
@@ -237,6 +235,8 @@ class ModelDatabase:
237
235
  "claude-opus-4-0": ANTHROPIC_OPUS_4_VERSIONED,
238
236
  "claude-opus-4-1": ANTHROPIC_OPUS_4_VERSIONED,
239
237
  "claude-opus-4-20250514": ANTHROPIC_OPUS_4_VERSIONED,
238
+ "claude-haiku-4-5-20251001": ANTHROPIC_SONNET_4_VERSIONED,
239
+ "claude-haiku-4-5": ANTHROPIC_SONNET_4_VERSIONED,
240
240
  # DeepSeek Models
241
241
  "deepseek-chat": DEEPSEEK_CHAT_STANDARD,
242
242
  # Google Gemini Models (vanilla aliases and versioned)
@@ -86,6 +86,7 @@ class ModelFactory:
86
86
  "claude-sonnet-4-0": Provider.ANTHROPIC,
87
87
  "claude-sonnet-4-5-20250929": Provider.ANTHROPIC,
88
88
  "claude-sonnet-4-5": Provider.ANTHROPIC,
89
+ "claude-haiku-4-5": Provider.ANTHROPIC,
89
90
  "deepseek-chat": Provider.DEEPSEEK,
90
91
  "gemini-2.0-flash": Provider.GOOGLE,
91
92
  "gemini-2.5-flash-preview-05-20": Provider.GOOGLE,
@@ -109,9 +110,10 @@ class ModelFactory:
109
110
  "sonnet35": "claude-3-5-sonnet-latest",
110
111
  "sonnet37": "claude-3-7-sonnet-latest",
111
112
  "claude": "claude-sonnet-4-0",
112
- "haiku": "claude-3-5-haiku-latest",
113
+ "haiku": "claude-haiku-4-5",
113
114
  "haiku3": "claude-3-haiku-20240307",
114
115
  "haiku35": "claude-3-5-haiku-latest",
116
+ "hauku45": "claude-haiku-4-5",
115
117
  "opus": "claude-opus-4-1",
116
118
  "opus4": "claude-opus-4-1",
117
119
  "opus3": "claude-3-opus-latest",
@@ -317,6 +319,11 @@ class ModelFactory:
317
319
  from fast_agent.llm.provider.openai.llm_groq import GroqLLM
318
320
 
319
321
  return GroqLLM
322
+ if provider == Provider.RESPONSES:
323
+ from fast_agent.llm.provider.openai.responses import ResponsesLLM
324
+
325
+ return ResponsesLLM
326
+
320
327
  except Exception as e:
321
328
  raise ModelConfigError(
322
329
  f"Provider '{provider.value}' is unavailable or missing dependencies: {e}"
@@ -14,6 +14,7 @@ PROVIDER_ENVIRONMENT_MAP: Dict[str, str] = {
14
14
  # default behaviour in _get_env_key_name is to capitalize the
15
15
  # provider name and suffix "_API_KEY" - so no specific mapping needed unless overriding
16
16
  "huggingface": "HF_TOKEN",
17
+ "responses": "OPENAI_API_KEY", # Temporary workaround
17
18
  }
18
19
  API_KEY_HINT_TEXT = "<your-api-key-here>"
19
20
 
@@ -31,3 +31,4 @@ class Provider(Enum):
31
31
  XAI = ("xai", "XAI") # For xAI Grok models
32
32
  BEDROCK = ("bedrock", "Bedrock")
33
33
  GROQ = ("groq", "Groq")
34
+ RESPONSES = ("responses", "responses")
@@ -8,6 +8,8 @@ from mcp import SamplingMessage
8
8
  from mcp.types import CreateMessageRequestParams
9
9
  from pydantic import Field
10
10
 
11
+ from fast_agent.constants import DEFAULT_MAX_ITERATIONS
12
+
11
13
 
12
14
  class RequestParams(CreateMessageRequestParams):
13
15
  """
@@ -34,7 +36,7 @@ class RequestParams(CreateMessageRequestParams):
34
36
  Agent/LLM maintains conversation history. Does not include applied Prompts
35
37
  """
36
38
 
37
- max_iterations: int = 20
39
+ max_iterations: int = DEFAULT_MAX_ITERATIONS
38
40
  """
39
41
  The maximum number of tool calls allowed in a conversation turn
40
42
  """