fast-agent-mcp 0.1.3__py3-none-any.whl → 0.1.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (28) hide show
  1. {fast_agent_mcp-0.1.3.dist-info → fast_agent_mcp-0.1.5.dist-info}/METADATA +5 -1
  2. {fast_agent_mcp-0.1.3.dist-info → fast_agent_mcp-0.1.5.dist-info}/RECORD +28 -17
  3. mcp_agent/agents/agent.py +46 -0
  4. mcp_agent/core/agent_app.py +373 -9
  5. mcp_agent/core/decorators.py +455 -0
  6. mcp_agent/core/enhanced_prompt.py +70 -4
  7. mcp_agent/core/factory.py +501 -0
  8. mcp_agent/core/fastagent.py +140 -1059
  9. mcp_agent/core/proxies.py +83 -47
  10. mcp_agent/core/validation.py +221 -0
  11. mcp_agent/human_input/handler.py +5 -2
  12. mcp_agent/mcp/mcp_aggregator.py +537 -47
  13. mcp_agent/mcp/mcp_connection_manager.py +13 -2
  14. mcp_agent/mcp_server/__init__.py +4 -0
  15. mcp_agent/mcp_server/agent_server.py +121 -0
  16. mcp_agent/resources/examples/internal/fastagent.config.yaml +52 -0
  17. mcp_agent/resources/examples/internal/prompt_category.py +21 -0
  18. mcp_agent/resources/examples/internal/prompt_sizing.py +53 -0
  19. mcp_agent/resources/examples/internal/sizer.py +24 -0
  20. mcp_agent/resources/examples/researcher/fastagent.config.yaml +14 -1
  21. mcp_agent/resources/examples/workflows/sse.py +23 -0
  22. mcp_agent/ui/console_display.py +278 -0
  23. mcp_agent/workflows/llm/augmented_llm.py +245 -179
  24. mcp_agent/workflows/llm/augmented_llm_anthropic.py +49 -3
  25. mcp_agent/workflows/llm/augmented_llm_openai.py +52 -4
  26. {fast_agent_mcp-0.1.3.dist-info → fast_agent_mcp-0.1.5.dist-info}/WHEEL +0 -0
  27. {fast_agent_mcp-0.1.3.dist-info → fast_agent_mcp-0.1.5.dist-info}/entry_points.txt +0 -0
  28. {fast_agent_mcp-0.1.3.dist-info → fast_agent_mcp-0.1.5.dist-info}/licenses/LICENSE +0 -0
mcp_agent/core/proxies.py CHANGED
@@ -45,13 +45,28 @@ class BaseAgentProxy:
45
45
  """Generate response for a message - must be implemented by subclasses"""
46
46
  raise NotImplementedError("Subclasses must implement generate_str")
47
47
 
48
-
49
- class AgentProxy(BaseAgentProxy):
50
- """Legacy proxy for individual agent operations"""
51
-
52
- async def generate_str(self, message: str, **kwargs) -> str:
53
- """Forward only the message to app.send, ignoring kwargs for legacy compatibility"""
54
- return await self._app.send(self._name, message)
48
+ async def load_prompt(self, prompt_name: str = None, arguments: dict[str, str] = None) -> str:
49
+ """
50
+ Use a Prompt from an MCP Server - implemented by subclasses.
51
+ Always returns an Assistant message.
52
+
53
+ Args:
54
+ prompt_name: Name of the prompt to load
55
+ arguments: Optional dictionary of string arguments for prompt templating
56
+ """
57
+ raise NotImplementedError("Subclasses must implement mcp-prompt")
58
+
59
+ async def apply_prompt(self, prompt_name: str = None, arguments: dict[str, str] = None) -> str:
60
+ """
61
+ Apply a Prompt from an MCP Server - implemented by subclasses.
62
+ This is the preferred method for applying prompts.
63
+ Always returns an Assistant message.
64
+
65
+ Args:
66
+ prompt_name: Name of the prompt to apply
67
+ arguments: Optional dictionary of string arguments for prompt templating
68
+ """
69
+ raise NotImplementedError("Subclasses must implement apply_prompt")
55
70
 
56
71
 
57
72
  class LLMAgentProxy(BaseAgentProxy):
@@ -65,6 +80,33 @@ class LLMAgentProxy(BaseAgentProxy):
65
80
  """Forward message and all kwargs to the agent's LLM"""
66
81
  return await self._agent._llm.generate_str(message, **kwargs)
67
82
 
83
+ async def load_prompt(self, prompt_name: str = None, arguments: dict[str, str] = None) -> str:
84
+ """
85
+ Load and apply a prompt from an MCP server.
86
+
87
+ Args:
88
+ prompt_name: Name of the prompt to load
89
+ arguments: Optional dictionary of string arguments for prompt templating
90
+
91
+ Returns:
92
+ The assistant's response
93
+ """
94
+ return await self._agent.load_prompt(prompt_name, arguments)
95
+
96
+ async def apply_prompt(self, prompt_name: str = None, arguments: dict[str, str] = None) -> str:
97
+ """
98
+ Apply a prompt from an MCP server.
99
+ This is the preferred method for applying prompts.
100
+
101
+ Args:
102
+ prompt_name: Name of the prompt to apply
103
+ arguments: Optional dictionary of string arguments for prompt templating
104
+
105
+ Returns:
106
+ The assistant's response
107
+ """
108
+ return await self._agent.apply_prompt(prompt_name, arguments)
109
+
68
110
 
69
111
  class WorkflowProxy(BaseAgentProxy):
70
112
  """Proxy for workflow types that implement generate_str() directly"""
@@ -119,52 +161,46 @@ class ChainProxy(BaseAgentProxy):
119
161
  self._continue_with_final = True # Default behavior
120
162
  self._cumulative = False # Default to sequential chaining
121
163
 
122
- async def generate_str(self, message: str) -> str:
123
- """Chain message through a sequence of agents with optional cumulative behavior"""
124
- if not self._cumulative:
125
- # Original sequential behavior
126
- current_message = message
127
- for agent_name in self._sequence:
128
- proxy = self._agent_proxies[agent_name]
129
- current_message = await proxy.generate_str(current_message)
130
- return current_message
131
- else:
132
- # Cumulative behavior
133
- original_message = message
134
- agent_responses = {}
135
-
136
- for agent_name in self._sequence:
137
- proxy = self._agent_proxies[agent_name]
164
+ async def generate_str(self, message: str, **kwargs) -> str:
165
+ """Chain message through a sequence of agents.
138
166
 
139
- if not agent_responses: # First agent
140
- response = await proxy.generate_str(original_message)
141
- else:
142
- # Construct context with previous responses
143
- context_message = "The following request was sent to the agents:\n"
144
- context_message += f"<fastagent:request>\n{original_message}\n</fastagent:request>\n\n"
167
+ For the first agent in the chain, pass all kwargs to maintain transparency.
145
168
 
146
- context_message += "Previous agent responses:\n"
169
+ Two modes of operation:
170
+ 1. Sequential (default): Each agent receives only the output of the previous agent
171
+ 2. Cumulative: Each agent receives all previous agent responses concatenated
172
+ """
173
+ if not self._sequence:
174
+ return message
147
175
 
148
- for prev_name in self._sequence:
149
- if prev_name in agent_responses:
150
- prev_response = agent_responses[prev_name]
151
- context_message += f'<fastagent:response agent="{prev_name}">\n{prev_response}\n</fastagent:response>\n\n'
176
+ # Process the first agent (same for both modes)
177
+ first_agent = self._sequence[0]
178
+ first_proxy = self._agent_proxies[first_agent]
179
+ first_response = await first_proxy.generate_str(message, **kwargs)
152
180
 
153
- context_message += f"Your task is to build upon this work to address: {original_message}"
181
+ if len(self._sequence) == 1:
182
+ return first_response
154
183
 
155
- response = await proxy.generate_str(context_message)
184
+ if self._cumulative:
185
+ # Cumulative mode: each agent gets all previous responses
186
+ cumulative_response = f'<fastagent:response agent="{first_agent}">\n{first_response}\n</fastagent:response>'
156
187
 
157
- agent_responses[agent_name] = response
188
+ # Process subsequent agents with cumulative results
189
+ for agent_name in self._sequence[1:]:
190
+ proxy = self._agent_proxies[agent_name]
191
+ # Pass all previous responses to next agent
192
+ agent_response = await proxy.generate_str(cumulative_response)
193
+ # Add this agent's response to the cumulative result
194
+ cumulative_response += f'\n\n<fastagent:response agent="{agent_name}">\n{agent_response}\n</fastagent:response>'
158
195
 
159
- # Format final output with ALL responses in XML format
160
- final_output = "The following request was sent to the agents:\n"
161
- final_output += (
162
- f"<fastagent:request>\n{original_message}\n</fastagent:request>\n\n"
163
- )
196
+ return cumulative_response
197
+ else:
198
+ # Sequential chaining (original behavior)
199
+ current_message = first_response
164
200
 
165
- for agent_name in self._sequence:
166
- response = agent_responses[agent_name]
167
- final_output += f'<fastagent:response agent="{agent_name}">\n{response}\n</fastagent:response>\n\n'
201
+ # For subsequent agents, just pass the message from previous agent
202
+ for agent_name in self._sequence[1:]:
203
+ proxy = self._agent_proxies[agent_name]
204
+ current_message = await proxy.generate_str(current_message)
168
205
 
169
- # Return the XML-structured combination of all responses
170
- return final_output.strip()
206
+ return current_message
@@ -0,0 +1,221 @@
1
+ """
2
+ Validation utilities for FastAgent configuration and dependencies.
3
+ """
4
+
5
+ from typing import Dict, List, Any
6
+ from mcp_agent.core.agent_types import AgentType
7
+ from mcp_agent.workflows.llm.augmented_llm import AugmentedLLM
8
+ from mcp_agent.core.exceptions import ServerConfigError, AgentConfigError, CircularDependencyError
9
+
10
+
11
+ def validate_server_references(context, agents: Dict[str, Dict[str, Any]]) -> None:
12
+ """
13
+ Validate that all server references in agent configurations exist in config.
14
+ Raises ServerConfigError if any referenced servers are not defined.
15
+
16
+ Args:
17
+ context: Application context
18
+ agents: Dictionary of agent configurations
19
+ """
20
+ if not context.config.mcp or not context.config.mcp.servers:
21
+ available_servers = set()
22
+ else:
23
+ available_servers = set(context.config.mcp.servers.keys())
24
+
25
+ # Check each agent's server references
26
+ for name, agent_data in agents.items():
27
+ config = agent_data["config"]
28
+ if config.servers:
29
+ missing = [s for s in config.servers if s not in available_servers]
30
+ if missing:
31
+ raise ServerConfigError(
32
+ f"Missing server configuration for agent '{name}'",
33
+ f"The following servers are referenced but not defined in config: {', '.join(missing)}",
34
+ )
35
+
36
+
37
+ def validate_workflow_references(agents: Dict[str, Dict[str, Any]]) -> None:
38
+ """
39
+ Validate that all workflow references point to valid agents/workflows.
40
+ Also validates that referenced agents have required configuration.
41
+ Raises AgentConfigError if any validation fails.
42
+
43
+ Args:
44
+ agents: Dictionary of agent configurations
45
+ """
46
+ available_components = set(agents.keys())
47
+
48
+ for name, agent_data in agents.items():
49
+ agent_type = agent_data["type"]
50
+
51
+ if agent_type == AgentType.PARALLEL.value:
52
+ # Check fan_in exists
53
+ fan_in = agent_data["fan_in"]
54
+ if fan_in not in available_components:
55
+ raise AgentConfigError(
56
+ f"Parallel workflow '{name}' references non-existent fan_in component: {fan_in}"
57
+ )
58
+
59
+ # Check fan_out agents exist
60
+ fan_out = agent_data["fan_out"]
61
+ missing = [a for a in fan_out if a not in available_components]
62
+ if missing:
63
+ raise AgentConfigError(
64
+ f"Parallel workflow '{name}' references non-existent fan_out components: {', '.join(missing)}"
65
+ )
66
+
67
+ elif agent_type == AgentType.ORCHESTRATOR.value:
68
+ # Check all child agents exist and are properly configured
69
+ child_agents = agent_data["child_agents"]
70
+ missing = [a for a in child_agents if a not in available_components]
71
+ if missing:
72
+ raise AgentConfigError(
73
+ f"Orchestrator '{name}' references non-existent agents: {', '.join(missing)}"
74
+ )
75
+
76
+ # Validate child agents have required LLM configuration
77
+ for agent_name in child_agents:
78
+ child_data = agents[agent_name]
79
+ if child_data["type"] == AgentType.BASIC.value:
80
+ # For basic agents, we'll validate LLM config during creation
81
+ continue
82
+ # Check if it's a workflow type or has LLM capability
83
+ # Workflows like EvaluatorOptimizer and Parallel are valid for orchestrator
84
+ func = child_data["func"]
85
+ workflow_types = [
86
+ AgentType.EVALUATOR_OPTIMIZER.value,
87
+ AgentType.PARALLEL.value,
88
+ AgentType.ROUTER.value,
89
+ AgentType.CHAIN.value,
90
+ ]
91
+
92
+ if not (
93
+ isinstance(func, AugmentedLLM)
94
+ or child_data["type"] in workflow_types
95
+ or (hasattr(func, "_llm") and func._llm is not None)
96
+ ):
97
+ raise AgentConfigError(
98
+ f"Agent '{agent_name}' used by orchestrator '{name}' lacks LLM capability",
99
+ "All agents used by orchestrators must be LLM-capable (either an AugmentedLLM or have an _llm property)",
100
+ )
101
+
102
+ elif agent_type == AgentType.ROUTER.value:
103
+ # Check all referenced agents exist
104
+ router_agents = agent_data["agents"]
105
+ missing = [a for a in router_agents if a not in available_components]
106
+ if missing:
107
+ raise AgentConfigError(
108
+ f"Router '{name}' references non-existent agents: {', '.join(missing)}"
109
+ )
110
+
111
+ elif agent_type == AgentType.EVALUATOR_OPTIMIZER.value:
112
+ # Check both evaluator and optimizer exist
113
+ evaluator = agent_data["evaluator"]
114
+ generator = agent_data["generator"]
115
+ missing = []
116
+ if evaluator not in available_components:
117
+ missing.append(f"evaluator: {evaluator}")
118
+ if generator not in available_components:
119
+ missing.append(f"generator: {generator}")
120
+ if missing:
121
+ raise AgentConfigError(
122
+ f"Evaluator-Optimizer '{name}' references non-existent components: {', '.join(missing)}"
123
+ )
124
+
125
+ elif agent_type == AgentType.CHAIN.value:
126
+ # Check that all agents in the sequence exist
127
+ sequence = agent_data.get("sequence", agent_data.get("agents", []))
128
+ missing = [a for a in sequence if a not in available_components]
129
+ if missing:
130
+ raise AgentConfigError(
131
+ f"Chain '{name}' references non-existent agents: {', '.join(missing)}"
132
+ )
133
+
134
+
135
+ def get_dependencies(
136
+ name: str,
137
+ agents: Dict[str, Dict[str, Any]],
138
+ visited: set,
139
+ path: set,
140
+ agent_type: AgentType = None
141
+ ) -> List[str]:
142
+ """
143
+ Get dependencies for an agent in topological order.
144
+ Works for both Parallel and Chain workflows.
145
+
146
+ Args:
147
+ name: Name of the agent
148
+ agents: Dictionary of agent configurations
149
+ visited: Set of already visited agents
150
+ path: Current path for cycle detection
151
+ agent_type: Optional type filter (e.g., only check Parallel or Chain)
152
+
153
+ Returns:
154
+ List of agent names in dependency order
155
+
156
+ Raises:
157
+ CircularDependencyError: If circular dependency detected
158
+ """
159
+ if name in path:
160
+ path_str = " -> ".join(path)
161
+ raise CircularDependencyError(f"Path: {path_str} -> {name}")
162
+
163
+ if name in visited:
164
+ return []
165
+
166
+ if name not in agents:
167
+ return []
168
+
169
+ config = agents[name]
170
+
171
+ # Skip if not the requested type (when filtering)
172
+ if agent_type and config["type"] != agent_type.value:
173
+ return []
174
+
175
+ path.add(name)
176
+ deps = []
177
+
178
+ # Handle dependencies based on agent type
179
+ if config["type"] == AgentType.PARALLEL.value:
180
+ # Get dependencies from fan-out agents
181
+ for fan_out in config["fan_out"]:
182
+ deps.extend(get_dependencies(fan_out, agents, visited, path, agent_type))
183
+ elif config["type"] == AgentType.CHAIN.value:
184
+ # Get dependencies from sequence agents
185
+ sequence = config.get("sequence", config.get("agents", []))
186
+ for agent_name in sequence:
187
+ deps.extend(
188
+ get_dependencies(agent_name, agents, visited, path, agent_type)
189
+ )
190
+
191
+ # Add this agent after its dependencies
192
+ deps.append(name)
193
+ visited.add(name)
194
+ path.remove(name)
195
+
196
+ return deps
197
+
198
+
199
+ def get_parallel_dependencies(
200
+ name: str,
201
+ agents: Dict[str, Dict[str, Any]],
202
+ visited: set,
203
+ path: set
204
+ ) -> List[str]:
205
+ """
206
+ Get dependencies for a parallel agent in topological order.
207
+ Legacy function that calls the more general get_dependencies.
208
+
209
+ Args:
210
+ name: Name of the parallel agent
211
+ agents: Dictionary of agent configurations
212
+ visited: Set of already visited agents
213
+ path: Current path for cycle detection
214
+
215
+ Returns:
216
+ List of agent names in dependency order
217
+
218
+ Raises:
219
+ CircularDependencyError: If circular dependency detected
220
+ """
221
+ return get_dependencies(name, agents, visited, path, AgentType.PARALLEL)
@@ -65,8 +65,11 @@ async def console_input_callback(request: HumanInputRequest) -> HumanInputRespon
65
65
  toolbar_color="ansimagenta",
66
66
  )
67
67
 
68
- # if response and (response.startswith("/") or response.startswith("@")):
69
- await handle_special_commands(response)
68
+ # Handle special commands but ignore dictionary results as they require app context
69
+ command_result = await handle_special_commands(response)
70
+ if isinstance(command_result, dict) and "list_prompts" in command_result:
71
+ from rich import print as rich_print
72
+ rich_print("[yellow]Prompt listing not available in human input context[/yellow]")
70
73
 
71
74
  except KeyboardInterrupt:
72
75
  console.print("\n[yellow]Input interrupted[/yellow]")