fast-agent-mcp 0.1.4__py3-none-any.whl → 0.1.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {fast_agent_mcp-0.1.4.dist-info → fast_agent_mcp-0.1.5.dist-info}/METADATA +5 -1
- {fast_agent_mcp-0.1.4.dist-info → fast_agent_mcp-0.1.5.dist-info}/RECORD +28 -17
- mcp_agent/agents/agent.py +46 -0
- mcp_agent/core/agent_app.py +373 -9
- mcp_agent/core/decorators.py +455 -0
- mcp_agent/core/enhanced_prompt.py +70 -4
- mcp_agent/core/factory.py +501 -0
- mcp_agent/core/fastagent.py +140 -1059
- mcp_agent/core/proxies.py +51 -11
- mcp_agent/core/validation.py +221 -0
- mcp_agent/human_input/handler.py +5 -2
- mcp_agent/mcp/mcp_aggregator.py +537 -47
- mcp_agent/mcp/mcp_connection_manager.py +13 -2
- mcp_agent/mcp_server/__init__.py +4 -0
- mcp_agent/mcp_server/agent_server.py +121 -0
- mcp_agent/resources/examples/internal/fastagent.config.yaml +52 -0
- mcp_agent/resources/examples/internal/prompt_category.py +21 -0
- mcp_agent/resources/examples/internal/prompt_sizing.py +53 -0
- mcp_agent/resources/examples/internal/sizer.py +24 -0
- mcp_agent/resources/examples/researcher/fastagent.config.yaml +14 -1
- mcp_agent/resources/examples/workflows/sse.py +23 -0
- mcp_agent/ui/console_display.py +278 -0
- mcp_agent/workflows/llm/augmented_llm.py +245 -179
- mcp_agent/workflows/llm/augmented_llm_anthropic.py +49 -3
- mcp_agent/workflows/llm/augmented_llm_openai.py +52 -4
- {fast_agent_mcp-0.1.4.dist-info → fast_agent_mcp-0.1.5.dist-info}/WHEEL +0 -0
- {fast_agent_mcp-0.1.4.dist-info → fast_agent_mcp-0.1.5.dist-info}/entry_points.txt +0 -0
- {fast_agent_mcp-0.1.4.dist-info → fast_agent_mcp-0.1.5.dist-info}/licenses/LICENSE +0 -0
mcp_agent/core/proxies.py
CHANGED
@@ -45,13 +45,28 @@ class BaseAgentProxy:
|
|
45
45
|
"""Generate response for a message - must be implemented by subclasses"""
|
46
46
|
raise NotImplementedError("Subclasses must implement generate_str")
|
47
47
|
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
48
|
+
async def load_prompt(self, prompt_name: str = None, arguments: dict[str, str] = None) -> str:
|
49
|
+
"""
|
50
|
+
Use a Prompt from an MCP Server - implemented by subclasses.
|
51
|
+
Always returns an Assistant message.
|
52
|
+
|
53
|
+
Args:
|
54
|
+
prompt_name: Name of the prompt to load
|
55
|
+
arguments: Optional dictionary of string arguments for prompt templating
|
56
|
+
"""
|
57
|
+
raise NotImplementedError("Subclasses must implement mcp-prompt")
|
58
|
+
|
59
|
+
async def apply_prompt(self, prompt_name: str = None, arguments: dict[str, str] = None) -> str:
|
60
|
+
"""
|
61
|
+
Apply a Prompt from an MCP Server - implemented by subclasses.
|
62
|
+
This is the preferred method for applying prompts.
|
63
|
+
Always returns an Assistant message.
|
64
|
+
|
65
|
+
Args:
|
66
|
+
prompt_name: Name of the prompt to apply
|
67
|
+
arguments: Optional dictionary of string arguments for prompt templating
|
68
|
+
"""
|
69
|
+
raise NotImplementedError("Subclasses must implement apply_prompt")
|
55
70
|
|
56
71
|
|
57
72
|
class LLMAgentProxy(BaseAgentProxy):
|
@@ -65,6 +80,33 @@ class LLMAgentProxy(BaseAgentProxy):
|
|
65
80
|
"""Forward message and all kwargs to the agent's LLM"""
|
66
81
|
return await self._agent._llm.generate_str(message, **kwargs)
|
67
82
|
|
83
|
+
async def load_prompt(self, prompt_name: str = None, arguments: dict[str, str] = None) -> str:
|
84
|
+
"""
|
85
|
+
Load and apply a prompt from an MCP server.
|
86
|
+
|
87
|
+
Args:
|
88
|
+
prompt_name: Name of the prompt to load
|
89
|
+
arguments: Optional dictionary of string arguments for prompt templating
|
90
|
+
|
91
|
+
Returns:
|
92
|
+
The assistant's response
|
93
|
+
"""
|
94
|
+
return await self._agent.load_prompt(prompt_name, arguments)
|
95
|
+
|
96
|
+
async def apply_prompt(self, prompt_name: str = None, arguments: dict[str, str] = None) -> str:
|
97
|
+
"""
|
98
|
+
Apply a prompt from an MCP server.
|
99
|
+
This is the preferred method for applying prompts.
|
100
|
+
|
101
|
+
Args:
|
102
|
+
prompt_name: Name of the prompt to apply
|
103
|
+
arguments: Optional dictionary of string arguments for prompt templating
|
104
|
+
|
105
|
+
Returns:
|
106
|
+
The assistant's response
|
107
|
+
"""
|
108
|
+
return await self._agent.apply_prompt(prompt_name, arguments)
|
109
|
+
|
68
110
|
|
69
111
|
class WorkflowProxy(BaseAgentProxy):
|
70
112
|
"""Proxy for workflow types that implement generate_str() directly"""
|
@@ -141,7 +183,7 @@ class ChainProxy(BaseAgentProxy):
|
|
141
183
|
|
142
184
|
if self._cumulative:
|
143
185
|
# Cumulative mode: each agent gets all previous responses
|
144
|
-
cumulative_response = f"
|
186
|
+
cumulative_response = f'<fastagent:response agent="{first_agent}">\n{first_response}\n</fastagent:response>'
|
145
187
|
|
146
188
|
# Process subsequent agents with cumulative results
|
147
189
|
for agent_name in self._sequence[1:]:
|
@@ -149,9 +191,7 @@ class ChainProxy(BaseAgentProxy):
|
|
149
191
|
# Pass all previous responses to next agent
|
150
192
|
agent_response = await proxy.generate_str(cumulative_response)
|
151
193
|
# Add this agent's response to the cumulative result
|
152
|
-
cumulative_response +=
|
153
|
-
f"\n\n<{agent_name}>\n{agent_response}\n</{agent_name}>"
|
154
|
-
)
|
194
|
+
cumulative_response += f'\n\n<fastagent:response agent="{agent_name}">\n{agent_response}\n</fastagent:response>'
|
155
195
|
|
156
196
|
return cumulative_response
|
157
197
|
else:
|
@@ -0,0 +1,221 @@
|
|
1
|
+
"""
|
2
|
+
Validation utilities for FastAgent configuration and dependencies.
|
3
|
+
"""
|
4
|
+
|
5
|
+
from typing import Dict, List, Any
|
6
|
+
from mcp_agent.core.agent_types import AgentType
|
7
|
+
from mcp_agent.workflows.llm.augmented_llm import AugmentedLLM
|
8
|
+
from mcp_agent.core.exceptions import ServerConfigError, AgentConfigError, CircularDependencyError
|
9
|
+
|
10
|
+
|
11
|
+
def validate_server_references(context, agents: Dict[str, Dict[str, Any]]) -> None:
|
12
|
+
"""
|
13
|
+
Validate that all server references in agent configurations exist in config.
|
14
|
+
Raises ServerConfigError if any referenced servers are not defined.
|
15
|
+
|
16
|
+
Args:
|
17
|
+
context: Application context
|
18
|
+
agents: Dictionary of agent configurations
|
19
|
+
"""
|
20
|
+
if not context.config.mcp or not context.config.mcp.servers:
|
21
|
+
available_servers = set()
|
22
|
+
else:
|
23
|
+
available_servers = set(context.config.mcp.servers.keys())
|
24
|
+
|
25
|
+
# Check each agent's server references
|
26
|
+
for name, agent_data in agents.items():
|
27
|
+
config = agent_data["config"]
|
28
|
+
if config.servers:
|
29
|
+
missing = [s for s in config.servers if s not in available_servers]
|
30
|
+
if missing:
|
31
|
+
raise ServerConfigError(
|
32
|
+
f"Missing server configuration for agent '{name}'",
|
33
|
+
f"The following servers are referenced but not defined in config: {', '.join(missing)}",
|
34
|
+
)
|
35
|
+
|
36
|
+
|
37
|
+
def validate_workflow_references(agents: Dict[str, Dict[str, Any]]) -> None:
|
38
|
+
"""
|
39
|
+
Validate that all workflow references point to valid agents/workflows.
|
40
|
+
Also validates that referenced agents have required configuration.
|
41
|
+
Raises AgentConfigError if any validation fails.
|
42
|
+
|
43
|
+
Args:
|
44
|
+
agents: Dictionary of agent configurations
|
45
|
+
"""
|
46
|
+
available_components = set(agents.keys())
|
47
|
+
|
48
|
+
for name, agent_data in agents.items():
|
49
|
+
agent_type = agent_data["type"]
|
50
|
+
|
51
|
+
if agent_type == AgentType.PARALLEL.value:
|
52
|
+
# Check fan_in exists
|
53
|
+
fan_in = agent_data["fan_in"]
|
54
|
+
if fan_in not in available_components:
|
55
|
+
raise AgentConfigError(
|
56
|
+
f"Parallel workflow '{name}' references non-existent fan_in component: {fan_in}"
|
57
|
+
)
|
58
|
+
|
59
|
+
# Check fan_out agents exist
|
60
|
+
fan_out = agent_data["fan_out"]
|
61
|
+
missing = [a for a in fan_out if a not in available_components]
|
62
|
+
if missing:
|
63
|
+
raise AgentConfigError(
|
64
|
+
f"Parallel workflow '{name}' references non-existent fan_out components: {', '.join(missing)}"
|
65
|
+
)
|
66
|
+
|
67
|
+
elif agent_type == AgentType.ORCHESTRATOR.value:
|
68
|
+
# Check all child agents exist and are properly configured
|
69
|
+
child_agents = agent_data["child_agents"]
|
70
|
+
missing = [a for a in child_agents if a not in available_components]
|
71
|
+
if missing:
|
72
|
+
raise AgentConfigError(
|
73
|
+
f"Orchestrator '{name}' references non-existent agents: {', '.join(missing)}"
|
74
|
+
)
|
75
|
+
|
76
|
+
# Validate child agents have required LLM configuration
|
77
|
+
for agent_name in child_agents:
|
78
|
+
child_data = agents[agent_name]
|
79
|
+
if child_data["type"] == AgentType.BASIC.value:
|
80
|
+
# For basic agents, we'll validate LLM config during creation
|
81
|
+
continue
|
82
|
+
# Check if it's a workflow type or has LLM capability
|
83
|
+
# Workflows like EvaluatorOptimizer and Parallel are valid for orchestrator
|
84
|
+
func = child_data["func"]
|
85
|
+
workflow_types = [
|
86
|
+
AgentType.EVALUATOR_OPTIMIZER.value,
|
87
|
+
AgentType.PARALLEL.value,
|
88
|
+
AgentType.ROUTER.value,
|
89
|
+
AgentType.CHAIN.value,
|
90
|
+
]
|
91
|
+
|
92
|
+
if not (
|
93
|
+
isinstance(func, AugmentedLLM)
|
94
|
+
or child_data["type"] in workflow_types
|
95
|
+
or (hasattr(func, "_llm") and func._llm is not None)
|
96
|
+
):
|
97
|
+
raise AgentConfigError(
|
98
|
+
f"Agent '{agent_name}' used by orchestrator '{name}' lacks LLM capability",
|
99
|
+
"All agents used by orchestrators must be LLM-capable (either an AugmentedLLM or have an _llm property)",
|
100
|
+
)
|
101
|
+
|
102
|
+
elif agent_type == AgentType.ROUTER.value:
|
103
|
+
# Check all referenced agents exist
|
104
|
+
router_agents = agent_data["agents"]
|
105
|
+
missing = [a for a in router_agents if a not in available_components]
|
106
|
+
if missing:
|
107
|
+
raise AgentConfigError(
|
108
|
+
f"Router '{name}' references non-existent agents: {', '.join(missing)}"
|
109
|
+
)
|
110
|
+
|
111
|
+
elif agent_type == AgentType.EVALUATOR_OPTIMIZER.value:
|
112
|
+
# Check both evaluator and optimizer exist
|
113
|
+
evaluator = agent_data["evaluator"]
|
114
|
+
generator = agent_data["generator"]
|
115
|
+
missing = []
|
116
|
+
if evaluator not in available_components:
|
117
|
+
missing.append(f"evaluator: {evaluator}")
|
118
|
+
if generator not in available_components:
|
119
|
+
missing.append(f"generator: {generator}")
|
120
|
+
if missing:
|
121
|
+
raise AgentConfigError(
|
122
|
+
f"Evaluator-Optimizer '{name}' references non-existent components: {', '.join(missing)}"
|
123
|
+
)
|
124
|
+
|
125
|
+
elif agent_type == AgentType.CHAIN.value:
|
126
|
+
# Check that all agents in the sequence exist
|
127
|
+
sequence = agent_data.get("sequence", agent_data.get("agents", []))
|
128
|
+
missing = [a for a in sequence if a not in available_components]
|
129
|
+
if missing:
|
130
|
+
raise AgentConfigError(
|
131
|
+
f"Chain '{name}' references non-existent agents: {', '.join(missing)}"
|
132
|
+
)
|
133
|
+
|
134
|
+
|
135
|
+
def get_dependencies(
|
136
|
+
name: str,
|
137
|
+
agents: Dict[str, Dict[str, Any]],
|
138
|
+
visited: set,
|
139
|
+
path: set,
|
140
|
+
agent_type: AgentType = None
|
141
|
+
) -> List[str]:
|
142
|
+
"""
|
143
|
+
Get dependencies for an agent in topological order.
|
144
|
+
Works for both Parallel and Chain workflows.
|
145
|
+
|
146
|
+
Args:
|
147
|
+
name: Name of the agent
|
148
|
+
agents: Dictionary of agent configurations
|
149
|
+
visited: Set of already visited agents
|
150
|
+
path: Current path for cycle detection
|
151
|
+
agent_type: Optional type filter (e.g., only check Parallel or Chain)
|
152
|
+
|
153
|
+
Returns:
|
154
|
+
List of agent names in dependency order
|
155
|
+
|
156
|
+
Raises:
|
157
|
+
CircularDependencyError: If circular dependency detected
|
158
|
+
"""
|
159
|
+
if name in path:
|
160
|
+
path_str = " -> ".join(path)
|
161
|
+
raise CircularDependencyError(f"Path: {path_str} -> {name}")
|
162
|
+
|
163
|
+
if name in visited:
|
164
|
+
return []
|
165
|
+
|
166
|
+
if name not in agents:
|
167
|
+
return []
|
168
|
+
|
169
|
+
config = agents[name]
|
170
|
+
|
171
|
+
# Skip if not the requested type (when filtering)
|
172
|
+
if agent_type and config["type"] != agent_type.value:
|
173
|
+
return []
|
174
|
+
|
175
|
+
path.add(name)
|
176
|
+
deps = []
|
177
|
+
|
178
|
+
# Handle dependencies based on agent type
|
179
|
+
if config["type"] == AgentType.PARALLEL.value:
|
180
|
+
# Get dependencies from fan-out agents
|
181
|
+
for fan_out in config["fan_out"]:
|
182
|
+
deps.extend(get_dependencies(fan_out, agents, visited, path, agent_type))
|
183
|
+
elif config["type"] == AgentType.CHAIN.value:
|
184
|
+
# Get dependencies from sequence agents
|
185
|
+
sequence = config.get("sequence", config.get("agents", []))
|
186
|
+
for agent_name in sequence:
|
187
|
+
deps.extend(
|
188
|
+
get_dependencies(agent_name, agents, visited, path, agent_type)
|
189
|
+
)
|
190
|
+
|
191
|
+
# Add this agent after its dependencies
|
192
|
+
deps.append(name)
|
193
|
+
visited.add(name)
|
194
|
+
path.remove(name)
|
195
|
+
|
196
|
+
return deps
|
197
|
+
|
198
|
+
|
199
|
+
def get_parallel_dependencies(
|
200
|
+
name: str,
|
201
|
+
agents: Dict[str, Dict[str, Any]],
|
202
|
+
visited: set,
|
203
|
+
path: set
|
204
|
+
) -> List[str]:
|
205
|
+
"""
|
206
|
+
Get dependencies for a parallel agent in topological order.
|
207
|
+
Legacy function that calls the more general get_dependencies.
|
208
|
+
|
209
|
+
Args:
|
210
|
+
name: Name of the parallel agent
|
211
|
+
agents: Dictionary of agent configurations
|
212
|
+
visited: Set of already visited agents
|
213
|
+
path: Current path for cycle detection
|
214
|
+
|
215
|
+
Returns:
|
216
|
+
List of agent names in dependency order
|
217
|
+
|
218
|
+
Raises:
|
219
|
+
CircularDependencyError: If circular dependency detected
|
220
|
+
"""
|
221
|
+
return get_dependencies(name, agents, visited, path, AgentType.PARALLEL)
|
mcp_agent/human_input/handler.py
CHANGED
@@ -65,8 +65,11 @@ async def console_input_callback(request: HumanInputRequest) -> HumanInputRespon
|
|
65
65
|
toolbar_color="ansimagenta",
|
66
66
|
)
|
67
67
|
|
68
|
-
#
|
69
|
-
await handle_special_commands(response)
|
68
|
+
# Handle special commands but ignore dictionary results as they require app context
|
69
|
+
command_result = await handle_special_commands(response)
|
70
|
+
if isinstance(command_result, dict) and "list_prompts" in command_result:
|
71
|
+
from rich import print as rich_print
|
72
|
+
rich_print("[yellow]Prompt listing not available in human input context[/yellow]")
|
70
73
|
|
71
74
|
except KeyboardInterrupt:
|
72
75
|
console.print("\n[yellow]Input interrupted[/yellow]")
|