fast-agent-mcp 0.1.4__py3-none-any.whl → 0.1.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (28) hide show
  1. {fast_agent_mcp-0.1.4.dist-info → fast_agent_mcp-0.1.6.dist-info}/METADATA +7 -1
  2. {fast_agent_mcp-0.1.4.dist-info → fast_agent_mcp-0.1.6.dist-info}/RECORD +28 -17
  3. mcp_agent/agents/agent.py +46 -0
  4. mcp_agent/core/agent_app.py +373 -9
  5. mcp_agent/core/decorators.py +455 -0
  6. mcp_agent/core/enhanced_prompt.py +71 -5
  7. mcp_agent/core/factory.py +501 -0
  8. mcp_agent/core/fastagent.py +143 -1059
  9. mcp_agent/core/proxies.py +71 -14
  10. mcp_agent/core/validation.py +221 -0
  11. mcp_agent/human_input/handler.py +5 -2
  12. mcp_agent/mcp/mcp_aggregator.py +537 -47
  13. mcp_agent/mcp/mcp_connection_manager.py +13 -2
  14. mcp_agent/mcp_server/__init__.py +4 -0
  15. mcp_agent/mcp_server/agent_server.py +121 -0
  16. mcp_agent/resources/examples/internal/fastagent.config.yaml +52 -0
  17. mcp_agent/resources/examples/internal/prompt_category.py +21 -0
  18. mcp_agent/resources/examples/internal/prompt_sizing.py +53 -0
  19. mcp_agent/resources/examples/internal/sizer.py +24 -0
  20. mcp_agent/resources/examples/researcher/fastagent.config.yaml +14 -1
  21. mcp_agent/resources/examples/workflows/sse.py +23 -0
  22. mcp_agent/ui/console_display.py +278 -0
  23. mcp_agent/workflows/llm/augmented_llm.py +245 -179
  24. mcp_agent/workflows/llm/augmented_llm_anthropic.py +49 -3
  25. mcp_agent/workflows/llm/augmented_llm_openai.py +52 -4
  26. {fast_agent_mcp-0.1.4.dist-info → fast_agent_mcp-0.1.6.dist-info}/WHEEL +0 -0
  27. {fast_agent_mcp-0.1.4.dist-info → fast_agent_mcp-0.1.6.dist-info}/entry_points.txt +0 -0
  28. {fast_agent_mcp-0.1.4.dist-info → fast_agent_mcp-0.1.6.dist-info}/licenses/LICENSE +0 -0
mcp_agent/core/proxies.py CHANGED
@@ -28,30 +28,62 @@ class BaseAgentProxy:
28
28
  self._name = name
29
29
 
30
30
  async def __call__(self, message: Optional[str] = None) -> str:
31
- """Allow: agent.researcher('message')"""
31
+ """Allow: agent.researcher('message') or just agent.researcher()"""
32
+ if message is None:
33
+ # When called with no arguments, use prompt() to open the interactive interface
34
+ return await self.prompt()
32
35
  return await self.send(message)
33
36
 
34
37
  async def send(self, message: Optional[str] = None) -> str:
35
38
  """Allow: agent.researcher.send('message')"""
36
39
  if message is None:
40
+ # For consistency with agent(), use prompt() to open the interactive interface
37
41
  return await self.prompt()
38
42
  return await self.generate_str(message)
39
43
 
40
44
  async def prompt(self, default_prompt: str = "") -> str:
41
45
  """Allow: agent.researcher.prompt()"""
42
- return await self._app.prompt(self._name, default_prompt)
46
+ from mcp_agent.core.agent_app import AgentApp
47
+
48
+ # First check if _app is directly an AgentApp
49
+ if isinstance(self._app, AgentApp):
50
+ return await self._app.prompt(self._name, default_prompt)
51
+
52
+ # If not, check if it's an MCPApp with an _agent_app attribute
53
+ if hasattr(self._app, "_agent_app"):
54
+ agent_app = self._app._agent_app
55
+ if agent_app:
56
+ return await agent_app.prompt(self._name, default_prompt)
57
+
58
+ # If we can't find an AgentApp, return an error message
59
+ return "ERROR: Cannot prompt() - AgentApp not found"
43
60
 
44
61
  async def generate_str(self, message: str) -> str:
45
62
  """Generate response for a message - must be implemented by subclasses"""
46
63
  raise NotImplementedError("Subclasses must implement generate_str")
47
64
 
48
-
49
- class AgentProxy(BaseAgentProxy):
50
- """Legacy proxy for individual agent operations"""
51
-
52
- async def generate_str(self, message: str, **kwargs) -> str:
53
- """Forward only the message to app.send, ignoring kwargs for legacy compatibility"""
54
- return await self._app.send(self._name, message)
65
+ async def load_prompt(self, prompt_name: str = None, arguments: dict[str, str] = None) -> str:
66
+ """
67
+ Use a Prompt from an MCP Server - implemented by subclasses.
68
+ Always returns an Assistant message.
69
+
70
+ Args:
71
+ prompt_name: Name of the prompt to load
72
+ arguments: Optional dictionary of string arguments for prompt templating
73
+ """
74
+ raise NotImplementedError("Subclasses must implement mcp-prompt")
75
+
76
+ async def apply_prompt(self, prompt_name: str = None, arguments: dict[str, str] = None) -> str:
77
+ """
78
+ Apply a Prompt from an MCP Server - implemented by subclasses.
79
+ This is the preferred method for applying prompts.
80
+ Always returns an Assistant message.
81
+
82
+ Args:
83
+ prompt_name: Name of the prompt to apply
84
+ arguments: Optional dictionary of string arguments for prompt templating
85
+ """
86
+ raise NotImplementedError("Subclasses must implement apply_prompt")
55
87
 
56
88
 
57
89
  class LLMAgentProxy(BaseAgentProxy):
@@ -65,6 +97,33 @@ class LLMAgentProxy(BaseAgentProxy):
65
97
  """Forward message and all kwargs to the agent's LLM"""
66
98
  return await self._agent._llm.generate_str(message, **kwargs)
67
99
 
100
+ async def load_prompt(self, prompt_name: str = None, arguments: dict[str, str] = None) -> str:
101
+ """
102
+ Load and apply a prompt from an MCP server.
103
+
104
+ Args:
105
+ prompt_name: Name of the prompt to load
106
+ arguments: Optional dictionary of string arguments for prompt templating
107
+
108
+ Returns:
109
+ The assistant's response
110
+ """
111
+ return await self._agent.load_prompt(prompt_name, arguments)
112
+
113
+ async def apply_prompt(self, prompt_name: str = None, arguments: dict[str, str] = None) -> str:
114
+ """
115
+ Apply a prompt from an MCP server.
116
+ This is the preferred method for applying prompts.
117
+
118
+ Args:
119
+ prompt_name: Name of the prompt to apply
120
+ arguments: Optional dictionary of string arguments for prompt templating
121
+
122
+ Returns:
123
+ The assistant's response
124
+ """
125
+ return await self._agent.apply_prompt(prompt_name, arguments)
126
+
68
127
 
69
128
  class WorkflowProxy(BaseAgentProxy):
70
129
  """Proxy for workflow types that implement generate_str() directly"""
@@ -141,7 +200,7 @@ class ChainProxy(BaseAgentProxy):
141
200
 
142
201
  if self._cumulative:
143
202
  # Cumulative mode: each agent gets all previous responses
144
- cumulative_response = f"<{first_agent}>\n{first_response}\n</{first_agent}>"
203
+ cumulative_response = f'<fastagent:response agent="{first_agent}">\n{first_response}\n</fastagent:response>'
145
204
 
146
205
  # Process subsequent agents with cumulative results
147
206
  for agent_name in self._sequence[1:]:
@@ -149,9 +208,7 @@ class ChainProxy(BaseAgentProxy):
149
208
  # Pass all previous responses to next agent
150
209
  agent_response = await proxy.generate_str(cumulative_response)
151
210
  # Add this agent's response to the cumulative result
152
- cumulative_response += (
153
- f"\n\n<{agent_name}>\n{agent_response}\n</{agent_name}>"
154
- )
211
+ cumulative_response += f'\n\n<fastagent:response agent="{agent_name}">\n{agent_response}\n</fastagent:response>'
155
212
 
156
213
  return cumulative_response
157
214
  else:
@@ -163,4 +220,4 @@ class ChainProxy(BaseAgentProxy):
163
220
  proxy = self._agent_proxies[agent_name]
164
221
  current_message = await proxy.generate_str(current_message)
165
222
 
166
- return current_message
223
+ return current_message
@@ -0,0 +1,221 @@
1
+ """
2
+ Validation utilities for FastAgent configuration and dependencies.
3
+ """
4
+
5
+ from typing import Dict, List, Any
6
+ from mcp_agent.core.agent_types import AgentType
7
+ from mcp_agent.workflows.llm.augmented_llm import AugmentedLLM
8
+ from mcp_agent.core.exceptions import ServerConfigError, AgentConfigError, CircularDependencyError
9
+
10
+
11
+ def validate_server_references(context, agents: Dict[str, Dict[str, Any]]) -> None:
12
+ """
13
+ Validate that all server references in agent configurations exist in config.
14
+ Raises ServerConfigError if any referenced servers are not defined.
15
+
16
+ Args:
17
+ context: Application context
18
+ agents: Dictionary of agent configurations
19
+ """
20
+ if not context.config.mcp or not context.config.mcp.servers:
21
+ available_servers = set()
22
+ else:
23
+ available_servers = set(context.config.mcp.servers.keys())
24
+
25
+ # Check each agent's server references
26
+ for name, agent_data in agents.items():
27
+ config = agent_data["config"]
28
+ if config.servers:
29
+ missing = [s for s in config.servers if s not in available_servers]
30
+ if missing:
31
+ raise ServerConfigError(
32
+ f"Missing server configuration for agent '{name}'",
33
+ f"The following servers are referenced but not defined in config: {', '.join(missing)}",
34
+ )
35
+
36
+
37
+ def validate_workflow_references(agents: Dict[str, Dict[str, Any]]) -> None:
38
+ """
39
+ Validate that all workflow references point to valid agents/workflows.
40
+ Also validates that referenced agents have required configuration.
41
+ Raises AgentConfigError if any validation fails.
42
+
43
+ Args:
44
+ agents: Dictionary of agent configurations
45
+ """
46
+ available_components = set(agents.keys())
47
+
48
+ for name, agent_data in agents.items():
49
+ agent_type = agent_data["type"]
50
+
51
+ if agent_type == AgentType.PARALLEL.value:
52
+ # Check fan_in exists
53
+ fan_in = agent_data["fan_in"]
54
+ if fan_in not in available_components:
55
+ raise AgentConfigError(
56
+ f"Parallel workflow '{name}' references non-existent fan_in component: {fan_in}"
57
+ )
58
+
59
+ # Check fan_out agents exist
60
+ fan_out = agent_data["fan_out"]
61
+ missing = [a for a in fan_out if a not in available_components]
62
+ if missing:
63
+ raise AgentConfigError(
64
+ f"Parallel workflow '{name}' references non-existent fan_out components: {', '.join(missing)}"
65
+ )
66
+
67
+ elif agent_type == AgentType.ORCHESTRATOR.value:
68
+ # Check all child agents exist and are properly configured
69
+ child_agents = agent_data["child_agents"]
70
+ missing = [a for a in child_agents if a not in available_components]
71
+ if missing:
72
+ raise AgentConfigError(
73
+ f"Orchestrator '{name}' references non-existent agents: {', '.join(missing)}"
74
+ )
75
+
76
+ # Validate child agents have required LLM configuration
77
+ for agent_name in child_agents:
78
+ child_data = agents[agent_name]
79
+ if child_data["type"] == AgentType.BASIC.value:
80
+ # For basic agents, we'll validate LLM config during creation
81
+ continue
82
+ # Check if it's a workflow type or has LLM capability
83
+ # Workflows like EvaluatorOptimizer and Parallel are valid for orchestrator
84
+ func = child_data["func"]
85
+ workflow_types = [
86
+ AgentType.EVALUATOR_OPTIMIZER.value,
87
+ AgentType.PARALLEL.value,
88
+ AgentType.ROUTER.value,
89
+ AgentType.CHAIN.value,
90
+ ]
91
+
92
+ if not (
93
+ isinstance(func, AugmentedLLM)
94
+ or child_data["type"] in workflow_types
95
+ or (hasattr(func, "_llm") and func._llm is not None)
96
+ ):
97
+ raise AgentConfigError(
98
+ f"Agent '{agent_name}' used by orchestrator '{name}' lacks LLM capability",
99
+ "All agents used by orchestrators must be LLM-capable (either an AugmentedLLM or have an _llm property)",
100
+ )
101
+
102
+ elif agent_type == AgentType.ROUTER.value:
103
+ # Check all referenced agents exist
104
+ router_agents = agent_data["agents"]
105
+ missing = [a for a in router_agents if a not in available_components]
106
+ if missing:
107
+ raise AgentConfigError(
108
+ f"Router '{name}' references non-existent agents: {', '.join(missing)}"
109
+ )
110
+
111
+ elif agent_type == AgentType.EVALUATOR_OPTIMIZER.value:
112
+ # Check both evaluator and optimizer exist
113
+ evaluator = agent_data["evaluator"]
114
+ generator = agent_data["generator"]
115
+ missing = []
116
+ if evaluator not in available_components:
117
+ missing.append(f"evaluator: {evaluator}")
118
+ if generator not in available_components:
119
+ missing.append(f"generator: {generator}")
120
+ if missing:
121
+ raise AgentConfigError(
122
+ f"Evaluator-Optimizer '{name}' references non-existent components: {', '.join(missing)}"
123
+ )
124
+
125
+ elif agent_type == AgentType.CHAIN.value:
126
+ # Check that all agents in the sequence exist
127
+ sequence = agent_data.get("sequence", agent_data.get("agents", []))
128
+ missing = [a for a in sequence if a not in available_components]
129
+ if missing:
130
+ raise AgentConfigError(
131
+ f"Chain '{name}' references non-existent agents: {', '.join(missing)}"
132
+ )
133
+
134
+
135
+ def get_dependencies(
136
+ name: str,
137
+ agents: Dict[str, Dict[str, Any]],
138
+ visited: set,
139
+ path: set,
140
+ agent_type: AgentType = None
141
+ ) -> List[str]:
142
+ """
143
+ Get dependencies for an agent in topological order.
144
+ Works for both Parallel and Chain workflows.
145
+
146
+ Args:
147
+ name: Name of the agent
148
+ agents: Dictionary of agent configurations
149
+ visited: Set of already visited agents
150
+ path: Current path for cycle detection
151
+ agent_type: Optional type filter (e.g., only check Parallel or Chain)
152
+
153
+ Returns:
154
+ List of agent names in dependency order
155
+
156
+ Raises:
157
+ CircularDependencyError: If circular dependency detected
158
+ """
159
+ if name in path:
160
+ path_str = " -> ".join(path)
161
+ raise CircularDependencyError(f"Path: {path_str} -> {name}")
162
+
163
+ if name in visited:
164
+ return []
165
+
166
+ if name not in agents:
167
+ return []
168
+
169
+ config = agents[name]
170
+
171
+ # Skip if not the requested type (when filtering)
172
+ if agent_type and config["type"] != agent_type.value:
173
+ return []
174
+
175
+ path.add(name)
176
+ deps = []
177
+
178
+ # Handle dependencies based on agent type
179
+ if config["type"] == AgentType.PARALLEL.value:
180
+ # Get dependencies from fan-out agents
181
+ for fan_out in config["fan_out"]:
182
+ deps.extend(get_dependencies(fan_out, agents, visited, path, agent_type))
183
+ elif config["type"] == AgentType.CHAIN.value:
184
+ # Get dependencies from sequence agents
185
+ sequence = config.get("sequence", config.get("agents", []))
186
+ for agent_name in sequence:
187
+ deps.extend(
188
+ get_dependencies(agent_name, agents, visited, path, agent_type)
189
+ )
190
+
191
+ # Add this agent after its dependencies
192
+ deps.append(name)
193
+ visited.add(name)
194
+ path.remove(name)
195
+
196
+ return deps
197
+
198
+
199
+ def get_parallel_dependencies(
200
+ name: str,
201
+ agents: Dict[str, Dict[str, Any]],
202
+ visited: set,
203
+ path: set
204
+ ) -> List[str]:
205
+ """
206
+ Get dependencies for a parallel agent in topological order.
207
+ Legacy function that calls the more general get_dependencies.
208
+
209
+ Args:
210
+ name: Name of the parallel agent
211
+ agents: Dictionary of agent configurations
212
+ visited: Set of already visited agents
213
+ path: Current path for cycle detection
214
+
215
+ Returns:
216
+ List of agent names in dependency order
217
+
218
+ Raises:
219
+ CircularDependencyError: If circular dependency detected
220
+ """
221
+ return get_dependencies(name, agents, visited, path, AgentType.PARALLEL)
@@ -65,8 +65,11 @@ async def console_input_callback(request: HumanInputRequest) -> HumanInputRespon
65
65
  toolbar_color="ansimagenta",
66
66
  )
67
67
 
68
- # if response and (response.startswith("/") or response.startswith("@")):
69
- await handle_special_commands(response)
68
+ # Handle special commands but ignore dictionary results as they require app context
69
+ command_result = await handle_special_commands(response)
70
+ if isinstance(command_result, dict) and "list_prompts" in command_result:
71
+ from rich import print as rich_print
72
+ rich_print("[yellow]Prompt listing not available in human input context[/yellow]")
70
73
 
71
74
  except KeyboardInterrupt:
72
75
  console.print("\n[yellow]Input interrupted[/yellow]")