fast-agent-mcp 0.1.13__py3-none-any.whl → 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {fast_agent_mcp-0.1.13.dist-info → fast_agent_mcp-0.2.0.dist-info}/METADATA +3 -4
- fast_agent_mcp-0.2.0.dist-info/RECORD +123 -0
- mcp_agent/__init__.py +75 -0
- mcp_agent/agents/agent.py +59 -371
- mcp_agent/agents/base_agent.py +522 -0
- mcp_agent/agents/workflow/__init__.py +1 -0
- mcp_agent/agents/workflow/chain_agent.py +173 -0
- mcp_agent/agents/workflow/evaluator_optimizer.py +362 -0
- mcp_agent/agents/workflow/orchestrator_agent.py +591 -0
- mcp_agent/{workflows/orchestrator → agents/workflow}/orchestrator_models.py +27 -11
- mcp_agent/agents/workflow/parallel_agent.py +182 -0
- mcp_agent/agents/workflow/router_agent.py +307 -0
- mcp_agent/app.py +3 -1
- mcp_agent/cli/commands/bootstrap.py +18 -7
- mcp_agent/cli/commands/setup.py +12 -4
- mcp_agent/cli/main.py +1 -1
- mcp_agent/cli/terminal.py +1 -1
- mcp_agent/config.py +24 -35
- mcp_agent/context.py +3 -1
- mcp_agent/context_dependent.py +3 -1
- mcp_agent/core/agent_types.py +10 -7
- mcp_agent/core/direct_agent_app.py +179 -0
- mcp_agent/core/direct_decorators.py +443 -0
- mcp_agent/core/direct_factory.py +476 -0
- mcp_agent/core/enhanced_prompt.py +15 -20
- mcp_agent/core/fastagent.py +151 -337
- mcp_agent/core/interactive_prompt.py +424 -0
- mcp_agent/core/mcp_content.py +19 -11
- mcp_agent/core/prompt.py +6 -2
- mcp_agent/core/validation.py +89 -16
- mcp_agent/executor/decorator_registry.py +6 -2
- mcp_agent/executor/temporal.py +35 -11
- mcp_agent/executor/workflow_signal.py +8 -2
- mcp_agent/human_input/handler.py +3 -1
- mcp_agent/llm/__init__.py +2 -0
- mcp_agent/{workflows/llm → llm}/augmented_llm.py +131 -256
- mcp_agent/{workflows/llm → llm}/augmented_llm_passthrough.py +35 -107
- mcp_agent/llm/augmented_llm_playback.py +83 -0
- mcp_agent/{workflows/llm → llm}/model_factory.py +26 -8
- mcp_agent/llm/providers/__init__.py +8 -0
- mcp_agent/{workflows/llm → llm/providers}/anthropic_utils.py +5 -1
- mcp_agent/{workflows/llm → llm/providers}/augmented_llm_anthropic.py +37 -141
- mcp_agent/llm/providers/augmented_llm_deepseek.py +53 -0
- mcp_agent/{workflows/llm → llm/providers}/augmented_llm_openai.py +112 -148
- mcp_agent/{workflows/llm → llm}/providers/multipart_converter_anthropic.py +78 -35
- mcp_agent/{workflows/llm → llm}/providers/multipart_converter_openai.py +73 -44
- mcp_agent/{workflows/llm → llm}/providers/openai_multipart.py +18 -4
- mcp_agent/{workflows/llm → llm/providers}/openai_utils.py +3 -3
- mcp_agent/{workflows/llm → llm}/providers/sampling_converter_anthropic.py +3 -3
- mcp_agent/{workflows/llm → llm}/providers/sampling_converter_openai.py +3 -3
- mcp_agent/{workflows/llm → llm}/sampling_converter.py +0 -21
- mcp_agent/{workflows/llm → llm}/sampling_format_converter.py +16 -1
- mcp_agent/logging/logger.py +2 -2
- mcp_agent/mcp/gen_client.py +9 -3
- mcp_agent/mcp/interfaces.py +67 -45
- mcp_agent/mcp/logger_textio.py +97 -0
- mcp_agent/mcp/mcp_agent_client_session.py +12 -4
- mcp_agent/mcp/mcp_agent_server.py +3 -1
- mcp_agent/mcp/mcp_aggregator.py +124 -93
- mcp_agent/mcp/mcp_connection_manager.py +21 -7
- mcp_agent/mcp/prompt_message_multipart.py +59 -1
- mcp_agent/mcp/prompt_render.py +77 -0
- mcp_agent/mcp/prompt_serialization.py +20 -13
- mcp_agent/mcp/prompts/prompt_constants.py +18 -0
- mcp_agent/mcp/prompts/prompt_helpers.py +327 -0
- mcp_agent/mcp/prompts/prompt_load.py +15 -5
- mcp_agent/mcp/prompts/prompt_server.py +154 -87
- mcp_agent/mcp/prompts/prompt_template.py +26 -35
- mcp_agent/mcp/resource_utils.py +3 -1
- mcp_agent/mcp/sampling.py +24 -15
- mcp_agent/mcp_server/agent_server.py +8 -5
- mcp_agent/mcp_server_registry.py +22 -9
- mcp_agent/resources/examples/{workflows → in_dev}/agent_build.py +1 -1
- mcp_agent/resources/examples/{data-analysis → in_dev}/slides.py +1 -1
- mcp_agent/resources/examples/internal/agent.py +4 -2
- mcp_agent/resources/examples/internal/fastagent.config.yaml +8 -2
- mcp_agent/resources/examples/prompting/image_server.py +3 -1
- mcp_agent/resources/examples/prompting/work_with_image.py +19 -0
- mcp_agent/ui/console_display.py +27 -7
- fast_agent_mcp-0.1.13.dist-info/RECORD +0 -164
- mcp_agent/core/agent_app.py +0 -570
- mcp_agent/core/agent_utils.py +0 -69
- mcp_agent/core/decorators.py +0 -448
- mcp_agent/core/factory.py +0 -422
- mcp_agent/core/proxies.py +0 -278
- mcp_agent/core/types.py +0 -22
- mcp_agent/eval/__init__.py +0 -0
- mcp_agent/mcp/stdio.py +0 -114
- mcp_agent/resources/examples/data-analysis/analysis-campaign.py +0 -188
- mcp_agent/resources/examples/data-analysis/analysis.py +0 -65
- mcp_agent/resources/examples/data-analysis/fastagent.config.yaml +0 -41
- mcp_agent/resources/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +0 -1471
- mcp_agent/resources/examples/mcp_researcher/researcher-eval.py +0 -53
- mcp_agent/resources/examples/researcher/fastagent.config.yaml +0 -66
- mcp_agent/resources/examples/researcher/researcher-eval.py +0 -53
- mcp_agent/resources/examples/researcher/researcher-imp.py +0 -189
- mcp_agent/resources/examples/researcher/researcher.py +0 -39
- mcp_agent/resources/examples/workflows/chaining.py +0 -45
- mcp_agent/resources/examples/workflows/evaluator.py +0 -79
- mcp_agent/resources/examples/workflows/fastagent.config.yaml +0 -24
- mcp_agent/resources/examples/workflows/human_input.py +0 -26
- mcp_agent/resources/examples/workflows/orchestrator.py +0 -74
- mcp_agent/resources/examples/workflows/parallel.py +0 -79
- mcp_agent/resources/examples/workflows/router.py +0 -54
- mcp_agent/resources/examples/workflows/sse.py +0 -23
- mcp_agent/telemetry/__init__.py +0 -0
- mcp_agent/telemetry/usage_tracking.py +0 -19
- mcp_agent/workflows/__init__.py +0 -0
- mcp_agent/workflows/embedding/__init__.py +0 -0
- mcp_agent/workflows/embedding/embedding_base.py +0 -58
- mcp_agent/workflows/embedding/embedding_cohere.py +0 -49
- mcp_agent/workflows/embedding/embedding_openai.py +0 -37
- mcp_agent/workflows/evaluator_optimizer/__init__.py +0 -0
- mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py +0 -447
- mcp_agent/workflows/intent_classifier/__init__.py +0 -0
- mcp_agent/workflows/intent_classifier/intent_classifier_base.py +0 -117
- mcp_agent/workflows/intent_classifier/intent_classifier_embedding.py +0 -130
- mcp_agent/workflows/intent_classifier/intent_classifier_embedding_cohere.py +0 -41
- mcp_agent/workflows/intent_classifier/intent_classifier_embedding_openai.py +0 -41
- mcp_agent/workflows/intent_classifier/intent_classifier_llm.py +0 -150
- mcp_agent/workflows/intent_classifier/intent_classifier_llm_anthropic.py +0 -60
- mcp_agent/workflows/intent_classifier/intent_classifier_llm_openai.py +0 -58
- mcp_agent/workflows/llm/__init__.py +0 -0
- mcp_agent/workflows/llm/augmented_llm_playback.py +0 -111
- mcp_agent/workflows/llm/providers/__init__.py +0 -8
- mcp_agent/workflows/orchestrator/__init__.py +0 -0
- mcp_agent/workflows/orchestrator/orchestrator.py +0 -535
- mcp_agent/workflows/parallel/__init__.py +0 -0
- mcp_agent/workflows/parallel/fan_in.py +0 -320
- mcp_agent/workflows/parallel/fan_out.py +0 -181
- mcp_agent/workflows/parallel/parallel_llm.py +0 -149
- mcp_agent/workflows/router/__init__.py +0 -0
- mcp_agent/workflows/router/router_base.py +0 -338
- mcp_agent/workflows/router/router_embedding.py +0 -226
- mcp_agent/workflows/router/router_embedding_cohere.py +0 -59
- mcp_agent/workflows/router/router_embedding_openai.py +0 -59
- mcp_agent/workflows/router/router_llm.py +0 -304
- mcp_agent/workflows/swarm/__init__.py +0 -0
- mcp_agent/workflows/swarm/swarm.py +0 -292
- mcp_agent/workflows/swarm/swarm_anthropic.py +0 -42
- mcp_agent/workflows/swarm/swarm_openai.py +0 -41
- {fast_agent_mcp-0.1.13.dist-info → fast_agent_mcp-0.2.0.dist-info}/WHEEL +0 -0
- {fast_agent_mcp-0.1.13.dist-info → fast_agent_mcp-0.2.0.dist-info}/entry_points.txt +0 -0
- {fast_agent_mcp-0.1.13.dist-info → fast_agent_mcp-0.2.0.dist-info}/licenses/LICENSE +0 -0
- /mcp_agent/{workflows/orchestrator → agents/workflow}/orchestrator_prompts.py +0 -0
- /mcp_agent/{workflows/llm → llm}/memory.py +0 -0
- /mcp_agent/{workflows/llm → llm}/prompt_utils.py +0 -0
mcp_agent/core/agent_app.py
DELETED
@@ -1,570 +0,0 @@
|
|
1
|
-
"""
|
2
|
-
Main application wrapper for interacting with agents.
|
3
|
-
"""
|
4
|
-
|
5
|
-
from typing import TYPE_CHECKING, Dict, Optional, Union
|
6
|
-
|
7
|
-
from mcp_agent.app import MCPApp
|
8
|
-
|
9
|
-
# Import proxies directly - they handle their own circular imports
|
10
|
-
from mcp_agent.core.proxies import (
|
11
|
-
BaseAgentProxy,
|
12
|
-
ChainProxy,
|
13
|
-
LLMAgentProxy,
|
14
|
-
RouterProxy,
|
15
|
-
WorkflowProxy,
|
16
|
-
)
|
17
|
-
from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
|
18
|
-
from mcp_agent.progress_display import progress_display
|
19
|
-
from mcp_agent.workflows.evaluator_optimizer.evaluator_optimizer import (
|
20
|
-
EvaluatorOptimizerLLM,
|
21
|
-
)
|
22
|
-
from mcp_agent.workflows.orchestrator.orchestrator import Orchestrator
|
23
|
-
from mcp_agent.workflows.parallel.parallel_llm import ParallelLLM
|
24
|
-
|
25
|
-
# Handle possible circular imports with types
|
26
|
-
if TYPE_CHECKING:
|
27
|
-
from mcp_agent.core.types import ProxyDict
|
28
|
-
else:
|
29
|
-
ProxyDict = Dict[str, BaseAgentProxy]
|
30
|
-
|
31
|
-
|
32
|
-
class AgentApp:
|
33
|
-
"""Main application wrapper"""
|
34
|
-
|
35
|
-
def __init__(self, app: MCPApp, agents: ProxyDict) -> None:
|
36
|
-
self._app = app
|
37
|
-
self._agents = agents
|
38
|
-
# Optional: set default agent for direct calls
|
39
|
-
self._default = next(iter(agents)) if agents else None
|
40
|
-
|
41
|
-
async def send_prompt(self, prompt: PromptMessageMultipart, agent_name: Optional[str] = None) -> str:
|
42
|
-
"""
|
43
|
-
Send a PromptMessageMultipart to an agent
|
44
|
-
|
45
|
-
Args:
|
46
|
-
prompt: The PromptMessageMultipart to send
|
47
|
-
agent_name: The name of the agent to send to (uses default if None)
|
48
|
-
|
49
|
-
Returns:
|
50
|
-
The agent's response as a string
|
51
|
-
"""
|
52
|
-
target = agent_name or self._default
|
53
|
-
if not target:
|
54
|
-
raise ValueError("No default agent available")
|
55
|
-
|
56
|
-
if target not in self._agents:
|
57
|
-
raise ValueError(f"No agent named '{target}'")
|
58
|
-
|
59
|
-
proxy = self._agents[target]
|
60
|
-
return await proxy.send_prompt(prompt)
|
61
|
-
|
62
|
-
async def send(
|
63
|
-
self,
|
64
|
-
message: Union[str, PromptMessageMultipart] = None,
|
65
|
-
agent_name: Optional[str] = None,
|
66
|
-
) -> str:
|
67
|
-
"""
|
68
|
-
Send a message to the default agent or specified agent
|
69
|
-
|
70
|
-
Args:
|
71
|
-
message: Either a string message or a PromptMessageMultipart object
|
72
|
-
agent_name: The name of the agent to send to (uses default if None)
|
73
|
-
|
74
|
-
Returns:
|
75
|
-
The agent's response as a string
|
76
|
-
"""
|
77
|
-
target = agent_name or self._default
|
78
|
-
if not target:
|
79
|
-
raise ValueError("No default agent available")
|
80
|
-
|
81
|
-
if target not in self._agents:
|
82
|
-
raise ValueError(f"No agent named '{target}'")
|
83
|
-
|
84
|
-
proxy = self._agents[target]
|
85
|
-
return await proxy.send(message)
|
86
|
-
|
87
|
-
async def apply_prompt(
|
88
|
-
self,
|
89
|
-
prompt_name: str,
|
90
|
-
arguments: Optional[dict[str, str]] = None,
|
91
|
-
agent_name: Optional[str] = None,
|
92
|
-
) -> str:
|
93
|
-
"""
|
94
|
-
Apply an MCP Server Prompt by name and return the assistant's response
|
95
|
-
|
96
|
-
Args:
|
97
|
-
prompt_name: The name of the prompt to apply
|
98
|
-
arguments: Optional dictionary of string arguments to pass to the prompt template
|
99
|
-
agent_name: The name of the agent to use (uses default if None)
|
100
|
-
|
101
|
-
Returns:
|
102
|
-
The assistant's response as a string
|
103
|
-
"""
|
104
|
-
target = agent_name or self._default
|
105
|
-
if not target:
|
106
|
-
raise ValueError("No default agent available")
|
107
|
-
|
108
|
-
if target not in self._agents:
|
109
|
-
raise ValueError(f"No agent named '{target}'")
|
110
|
-
|
111
|
-
proxy = self._agents[target]
|
112
|
-
return await proxy.apply_prompt(prompt_name, arguments)
|
113
|
-
|
114
|
-
async def with_resource(
|
115
|
-
self,
|
116
|
-
prompt_content: Union[str, PromptMessageMultipart],
|
117
|
-
server_name: str,
|
118
|
-
resource_name: str,
|
119
|
-
agent_name: Optional[str] = None,
|
120
|
-
) -> str:
|
121
|
-
"""
|
122
|
-
Create a prompt with the given content and resource, then send it to the agent.
|
123
|
-
|
124
|
-
Args:
|
125
|
-
prompt_content: Either a string message or an existing PromptMessageMultipart
|
126
|
-
server_name: Name of the MCP server to retrieve the resource from
|
127
|
-
resource_name: Name or URI of the resource to retrieve
|
128
|
-
agent_name: The name of the agent to use (uses default if None)
|
129
|
-
|
130
|
-
Returns:
|
131
|
-
The agent's response as a string
|
132
|
-
"""
|
133
|
-
target = agent_name or self._default
|
134
|
-
if not target:
|
135
|
-
raise ValueError("No default agent available")
|
136
|
-
|
137
|
-
if target not in self._agents:
|
138
|
-
raise ValueError(f"No agent named '{target}'")
|
139
|
-
|
140
|
-
proxy = self._agents[target]
|
141
|
-
return await proxy.with_resource(prompt_content, server_name, resource_name)
|
142
|
-
|
143
|
-
async def prompt(self, agent_name: Optional[str] = None, default: str = "") -> str:
|
144
|
-
"""
|
145
|
-
Interactive prompt for sending messages with advanced features.
|
146
|
-
|
147
|
-
Args:
|
148
|
-
agent_name: Optional target agent name (uses default if not specified)
|
149
|
-
default: Default message to use when user presses enter
|
150
|
-
"""
|
151
|
-
from mcp_agent.core.enhanced_prompt import (
|
152
|
-
get_enhanced_input,
|
153
|
-
handle_special_commands,
|
154
|
-
)
|
155
|
-
|
156
|
-
agent = agent_name or self._default
|
157
|
-
|
158
|
-
if agent not in self._agents:
|
159
|
-
raise ValueError(f"No agent named '{agent}'")
|
160
|
-
|
161
|
-
# Pass all available agent names for auto-completion
|
162
|
-
available_agents = list(self._agents.keys())
|
163
|
-
|
164
|
-
# Create agent_types dictionary mapping agent names to their types
|
165
|
-
agent_types = {}
|
166
|
-
for name, proxy in self._agents.items():
|
167
|
-
# Determine agent type based on the proxy type
|
168
|
-
if isinstance(proxy, LLMAgentProxy):
|
169
|
-
# Convert AgentType.BASIC.value ("agent") to "Agent"
|
170
|
-
agent_types[name] = "Agent"
|
171
|
-
elif isinstance(proxy, RouterProxy):
|
172
|
-
agent_types[name] = "Router"
|
173
|
-
elif isinstance(proxy, ChainProxy):
|
174
|
-
agent_types[name] = "Chain"
|
175
|
-
elif isinstance(proxy, WorkflowProxy):
|
176
|
-
# For workflow proxies, check the workflow type
|
177
|
-
workflow = proxy._workflow
|
178
|
-
if isinstance(workflow, Orchestrator):
|
179
|
-
agent_types[name] = "Orchestrator"
|
180
|
-
elif isinstance(workflow, ParallelLLM):
|
181
|
-
agent_types[name] = "Parallel"
|
182
|
-
elif isinstance(workflow, EvaluatorOptimizerLLM):
|
183
|
-
agent_types[name] = "Evaluator"
|
184
|
-
else:
|
185
|
-
agent_types[name] = "Workflow"
|
186
|
-
|
187
|
-
result = ""
|
188
|
-
while True:
|
189
|
-
with progress_display.paused():
|
190
|
-
# Use the enhanced input method with advanced features
|
191
|
-
user_input = await get_enhanced_input(
|
192
|
-
agent_name=agent,
|
193
|
-
default=default,
|
194
|
-
show_default=(default != ""),
|
195
|
-
show_stop_hint=True,
|
196
|
-
multiline=False, # Default to single-line mode
|
197
|
-
available_agent_names=available_agents,
|
198
|
-
syntax=None, # Can enable syntax highlighting for code input
|
199
|
-
agent_types=agent_types, # Pass agent types for display
|
200
|
-
)
|
201
|
-
|
202
|
-
# Handle special commands
|
203
|
-
command_result = await handle_special_commands(user_input, self)
|
204
|
-
|
205
|
-
# Check if we should switch agents
|
206
|
-
if isinstance(command_result, dict):
|
207
|
-
if "switch_agent" in command_result:
|
208
|
-
agent = command_result["switch_agent"]
|
209
|
-
continue
|
210
|
-
elif "list_prompts" in command_result:
|
211
|
-
# Handle listing of prompts
|
212
|
-
from rich import print as rich_print
|
213
|
-
|
214
|
-
try:
|
215
|
-
# Check if we have any agents with aggregator capabilities
|
216
|
-
found_prompts = False
|
217
|
-
for agent_name, agent_proxy in self._agents.items():
|
218
|
-
# Check if agent has an mcp_aggregator (agent instance)
|
219
|
-
if hasattr(agent_proxy, "_agent") and hasattr(agent_proxy._agent, "list_prompts"):
|
220
|
-
rich_print(f"\n[bold]Fetching prompts for agent [cyan]{agent_name}[/cyan]...[/bold]")
|
221
|
-
prompt_servers = await agent_proxy._agent.list_prompts()
|
222
|
-
|
223
|
-
if prompt_servers:
|
224
|
-
found_prompts = True
|
225
|
-
for (
|
226
|
-
server_name,
|
227
|
-
prompts_info,
|
228
|
-
) in prompt_servers.items():
|
229
|
-
if prompts_info and hasattr(prompts_info, "prompts") and prompts_info.prompts:
|
230
|
-
rich_print(f"\n[bold cyan]{server_name}:[/bold cyan]")
|
231
|
-
for prompt in prompts_info.prompts:
|
232
|
-
rich_print(f" {prompt.name}")
|
233
|
-
elif isinstance(prompts_info, list) and prompts_info:
|
234
|
-
rich_print(f"\n[bold cyan]{server_name}:[/bold cyan]")
|
235
|
-
for prompt in prompts_info:
|
236
|
-
if isinstance(prompt, dict) and "name" in prompt:
|
237
|
-
rich_print(f" {prompt['name']}")
|
238
|
-
else:
|
239
|
-
rich_print(f" {prompt}")
|
240
|
-
|
241
|
-
if not found_prompts:
|
242
|
-
rich_print("[yellow]No prompts available[/yellow]")
|
243
|
-
except Exception as e:
|
244
|
-
rich_print(f"[red]Error listing prompts: {e}[/red]")
|
245
|
-
continue
|
246
|
-
elif "select_prompt" in command_result:
|
247
|
-
from rich import print as rich_print
|
248
|
-
from rich.console import Console
|
249
|
-
from rich.table import Table
|
250
|
-
|
251
|
-
console = Console()
|
252
|
-
|
253
|
-
# Get the current agent proxy
|
254
|
-
current_proxy = self._agents[agent]
|
255
|
-
|
256
|
-
# Check if the agent has prompt capabilities
|
257
|
-
if not hasattr(current_proxy, "_agent") or not hasattr(current_proxy._agent, "list_prompts"):
|
258
|
-
rich_print(f"[red]Current agent '{agent}' does not support prompts[/red]")
|
259
|
-
continue
|
260
|
-
|
261
|
-
try:
|
262
|
-
# Create a list to store prompt data for selection
|
263
|
-
all_prompts = []
|
264
|
-
|
265
|
-
# Get prompts from the current agent
|
266
|
-
rich_print(f"\n[bold]Fetching prompts for agent [cyan]{agent}[/cyan]...[/bold]")
|
267
|
-
prompt_servers = await current_proxy._agent.list_prompts()
|
268
|
-
|
269
|
-
if not prompt_servers:
|
270
|
-
rich_print("[yellow]No prompts available for this agent[/yellow]")
|
271
|
-
continue
|
272
|
-
|
273
|
-
# Process retrieved prompts
|
274
|
-
for server_name, prompts_info in prompt_servers.items():
|
275
|
-
# Skip servers with no prompts
|
276
|
-
if not prompts_info:
|
277
|
-
continue
|
278
|
-
|
279
|
-
# Extract prompts from the response
|
280
|
-
prompts = []
|
281
|
-
if hasattr(prompts_info, "prompts"):
|
282
|
-
prompts = prompts_info.prompts
|
283
|
-
elif isinstance(prompts_info, list):
|
284
|
-
prompts = prompts_info
|
285
|
-
|
286
|
-
# Process each prompt
|
287
|
-
for prompt in prompts:
|
288
|
-
# Basic prompt information
|
289
|
-
prompt_name = getattr(prompt, "name", "Unknown")
|
290
|
-
description = getattr(prompt, "description", "No description")
|
291
|
-
|
292
|
-
# Extract argument information
|
293
|
-
arg_names = []
|
294
|
-
required_args = []
|
295
|
-
optional_args = []
|
296
|
-
arg_descriptions = {}
|
297
|
-
|
298
|
-
# Get arguments list from prompt (MCP SDK Prompt.arguments)
|
299
|
-
arguments = getattr(prompt, "arguments", None)
|
300
|
-
if arguments:
|
301
|
-
for arg in arguments:
|
302
|
-
# Each arg is a PromptArgument with name and required fields
|
303
|
-
name = getattr(arg, "name", None)
|
304
|
-
if name:
|
305
|
-
arg_names.append(name)
|
306
|
-
|
307
|
-
# Store description if available
|
308
|
-
description = getattr(arg, "description", None)
|
309
|
-
if description:
|
310
|
-
arg_descriptions[name] = description
|
311
|
-
|
312
|
-
# Check if required (default to False per MCP spec)
|
313
|
-
if getattr(arg, "required", False):
|
314
|
-
required_args.append(name)
|
315
|
-
else:
|
316
|
-
optional_args.append(name)
|
317
|
-
|
318
|
-
# Create a namespaced version with the server
|
319
|
-
namespaced_name = f"{server_name}-{prompt_name}"
|
320
|
-
|
321
|
-
# Add to our collection
|
322
|
-
all_prompts.append(
|
323
|
-
{
|
324
|
-
"server": server_name,
|
325
|
-
"name": prompt_name,
|
326
|
-
"namespaced_name": namespaced_name,
|
327
|
-
"description": description,
|
328
|
-
"arg_count": len(arg_names),
|
329
|
-
"arg_names": arg_names,
|
330
|
-
"required_args": required_args,
|
331
|
-
"optional_args": optional_args,
|
332
|
-
"arg_descriptions": arg_descriptions,
|
333
|
-
}
|
334
|
-
)
|
335
|
-
|
336
|
-
# If no prompts were found
|
337
|
-
if not all_prompts:
|
338
|
-
rich_print("[yellow]No prompts available for this agent[/yellow]")
|
339
|
-
continue
|
340
|
-
|
341
|
-
# Sort prompts by server then name
|
342
|
-
all_prompts.sort(key=lambda p: (p["server"], p["name"]))
|
343
|
-
|
344
|
-
# Check if a specific prompt was requested
|
345
|
-
if "prompt_name" in command_result and command_result["prompt_name"]:
|
346
|
-
requested_name = command_result["prompt_name"]
|
347
|
-
# Find the prompt in our list (either by name or namespaced name)
|
348
|
-
matching_prompts = [p for p in all_prompts if p["name"] == requested_name or p["namespaced_name"] == requested_name]
|
349
|
-
|
350
|
-
if not matching_prompts:
|
351
|
-
rich_print(f"[red]Prompt '{requested_name}' not found[/red]")
|
352
|
-
rich_print("[yellow]Available prompts:[/yellow]")
|
353
|
-
for p in all_prompts:
|
354
|
-
rich_print(f" {p['namespaced_name']}")
|
355
|
-
continue
|
356
|
-
|
357
|
-
# If we found exactly one match, use it
|
358
|
-
if len(matching_prompts) == 1:
|
359
|
-
selected_prompt = matching_prompts[0]
|
360
|
-
else:
|
361
|
-
# If multiple matches, show them and ask user to be more specific
|
362
|
-
rich_print(f"[yellow]Multiple prompts match '{requested_name}':[/yellow]")
|
363
|
-
for i, p in enumerate(matching_prompts):
|
364
|
-
rich_print(f" {i + 1}. {p['namespaced_name']} - {p['description']}")
|
365
|
-
|
366
|
-
# Ask user to select one
|
367
|
-
from mcp_agent.core.enhanced_prompt import (
|
368
|
-
get_selection_input,
|
369
|
-
)
|
370
|
-
|
371
|
-
selection = await get_selection_input("Enter prompt number to select: ", default="1")
|
372
|
-
|
373
|
-
try:
|
374
|
-
idx = int(selection) - 1
|
375
|
-
if 0 <= idx < len(matching_prompts):
|
376
|
-
selected_prompt = matching_prompts[idx]
|
377
|
-
else:
|
378
|
-
rich_print("[red]Invalid selection[/red]")
|
379
|
-
continue
|
380
|
-
except ValueError:
|
381
|
-
rich_print("[red]Invalid input, please enter a number[/red]")
|
382
|
-
continue
|
383
|
-
else:
|
384
|
-
# Display prompt selection UI
|
385
|
-
table = Table(title="Available MCP Prompts")
|
386
|
-
table.add_column("#", justify="right", style="cyan")
|
387
|
-
table.add_column("Server", style="green")
|
388
|
-
table.add_column("Prompt Name", style="bright_blue")
|
389
|
-
table.add_column("Description")
|
390
|
-
table.add_column("Args", justify="center")
|
391
|
-
|
392
|
-
# Add all prompts to the table
|
393
|
-
for i, prompt in enumerate(all_prompts):
|
394
|
-
# Get argument counts
|
395
|
-
required_args = prompt["required_args"]
|
396
|
-
optional_args = prompt["optional_args"]
|
397
|
-
|
398
|
-
# Format args column nicely
|
399
|
-
if required_args and optional_args:
|
400
|
-
args_display = f"[bold]{len(required_args)}[/bold]+{len(optional_args)}"
|
401
|
-
elif required_args:
|
402
|
-
args_display = f"[bold]{len(required_args)}[/bold]"
|
403
|
-
elif optional_args:
|
404
|
-
args_display = f"{len(optional_args)} opt"
|
405
|
-
else:
|
406
|
-
args_display = "0"
|
407
|
-
|
408
|
-
table.add_row(
|
409
|
-
str(i + 1),
|
410
|
-
prompt["server"],
|
411
|
-
prompt["name"],
|
412
|
-
prompt["description"] or "No description",
|
413
|
-
args_display,
|
414
|
-
)
|
415
|
-
|
416
|
-
console.print(table)
|
417
|
-
prompt_names = [str(i + 1) for i in range(len(all_prompts))]
|
418
|
-
|
419
|
-
# Ask user to select a prompt
|
420
|
-
from mcp_agent.core.enhanced_prompt import (
|
421
|
-
get_selection_input,
|
422
|
-
)
|
423
|
-
|
424
|
-
selection = await get_selection_input(
|
425
|
-
"Enter prompt number to select (or press Enter to cancel): ",
|
426
|
-
options=prompt_names,
|
427
|
-
allow_cancel=True,
|
428
|
-
)
|
429
|
-
|
430
|
-
# Make cancellation easier
|
431
|
-
if not selection or selection.strip() == "":
|
432
|
-
rich_print("[yellow]Prompt selection cancelled[/yellow]")
|
433
|
-
continue
|
434
|
-
|
435
|
-
try:
|
436
|
-
idx = int(selection) - 1
|
437
|
-
if 0 <= idx < len(all_prompts):
|
438
|
-
selected_prompt = all_prompts[idx]
|
439
|
-
else:
|
440
|
-
rich_print("[red]Invalid selection[/red]")
|
441
|
-
continue
|
442
|
-
except ValueError:
|
443
|
-
rich_print("[red]Invalid input, please enter a number[/red]")
|
444
|
-
continue
|
445
|
-
|
446
|
-
# Get our prompt arguments
|
447
|
-
required_args = selected_prompt["required_args"]
|
448
|
-
optional_args = selected_prompt["optional_args"]
|
449
|
-
arg_descriptions = selected_prompt.get("arg_descriptions", {})
|
450
|
-
|
451
|
-
# Always initialize arg_values
|
452
|
-
arg_values = {}
|
453
|
-
|
454
|
-
# Show argument info if we have any
|
455
|
-
if required_args or optional_args:
|
456
|
-
# Display information about the arguments
|
457
|
-
if required_args and optional_args:
|
458
|
-
rich_print(
|
459
|
-
f"\n[bold]Prompt [cyan]{selected_prompt['name']}[/cyan] requires {len(required_args)} arguments and has {len(optional_args)} optional arguments:[/bold]"
|
460
|
-
)
|
461
|
-
elif required_args:
|
462
|
-
rich_print(f"\n[bold]Prompt [cyan]{selected_prompt['name']}[/cyan] requires {len(required_args)} arguments:[/bold]")
|
463
|
-
elif optional_args:
|
464
|
-
rich_print(f"\n[bold]Prompt [cyan]{selected_prompt['name']}[/cyan] has {len(optional_args)} optional arguments:[/bold]")
|
465
|
-
|
466
|
-
# Collect required arguments
|
467
|
-
for arg_name in required_args:
|
468
|
-
# Get description if available
|
469
|
-
description = arg_descriptions.get(arg_name, "")
|
470
|
-
|
471
|
-
# Collect required argument value
|
472
|
-
from mcp_agent.core.enhanced_prompt import (
|
473
|
-
get_argument_input,
|
474
|
-
)
|
475
|
-
|
476
|
-
arg_value = await get_argument_input(
|
477
|
-
arg_name=arg_name,
|
478
|
-
description=description,
|
479
|
-
required=True,
|
480
|
-
)
|
481
|
-
# Add to arg_values if a value was provided
|
482
|
-
if arg_value is not None:
|
483
|
-
arg_values[arg_name] = arg_value
|
484
|
-
|
485
|
-
# Only include non-empty values for optional arguments
|
486
|
-
if optional_args:
|
487
|
-
# Collect optional arguments
|
488
|
-
for arg_name in optional_args:
|
489
|
-
# Get description if available
|
490
|
-
description = arg_descriptions.get(arg_name, "")
|
491
|
-
|
492
|
-
from mcp_agent.core.enhanced_prompt import (
|
493
|
-
get_argument_input,
|
494
|
-
)
|
495
|
-
|
496
|
-
arg_value = await get_argument_input(
|
497
|
-
arg_name=arg_name,
|
498
|
-
description=description,
|
499
|
-
required=False,
|
500
|
-
)
|
501
|
-
# Only include non-empty values for optional arguments
|
502
|
-
if arg_value:
|
503
|
-
arg_values[arg_name] = arg_value
|
504
|
-
|
505
|
-
# Apply the prompt with or without arguments
|
506
|
-
rich_print(f"\n[bold]Applying prompt [cyan]{selected_prompt['namespaced_name']}[/cyan]...[/bold]")
|
507
|
-
|
508
|
-
# Call apply_prompt on the agent - always pass arg_values (empty dict if no args)
|
509
|
-
await current_proxy._agent.apply_prompt(selected_prompt["namespaced_name"], arg_values)
|
510
|
-
|
511
|
-
except Exception as e:
|
512
|
-
import traceback
|
513
|
-
|
514
|
-
rich_print(f"[red]Error selecting or applying prompt: {e}[/red]")
|
515
|
-
rich_print(f"[dim]{traceback.format_exc()}[/dim]")
|
516
|
-
continue
|
517
|
-
|
518
|
-
# Skip further processing if command was handled
|
519
|
-
if command_result:
|
520
|
-
continue
|
521
|
-
|
522
|
-
if user_input.upper() == "STOP":
|
523
|
-
return result
|
524
|
-
if user_input == "":
|
525
|
-
continue
|
526
|
-
|
527
|
-
result = await self.send(user_input, agent)
|
528
|
-
|
529
|
-
# Check if current agent is a chain that should continue with final agent
|
530
|
-
if agent_types.get(agent) == "Chain":
|
531
|
-
proxy = self._agents[agent]
|
532
|
-
if isinstance(proxy, ChainProxy) and proxy._continue_with_final:
|
533
|
-
# Get the last agent in the sequence
|
534
|
-
last_agent = proxy._sequence[-1]
|
535
|
-
# Switch to that agent for the next iteration
|
536
|
-
agent = last_agent
|
537
|
-
|
538
|
-
return result
|
539
|
-
|
540
|
-
def __getattr__(self, name: str) -> BaseAgentProxy:
|
541
|
-
"""Support: agent.researcher"""
|
542
|
-
if name not in self._agents:
|
543
|
-
raise AttributeError(f"No agent named '{name}'")
|
544
|
-
return self._agents[name]
|
545
|
-
|
546
|
-
def __getitem__(self, name: str) -> BaseAgentProxy:
|
547
|
-
"""Support: agent['researcher']"""
|
548
|
-
if name not in self._agents:
|
549
|
-
raise KeyError(f"No agent named '{name}'")
|
550
|
-
return self._agents[name]
|
551
|
-
|
552
|
-
async def __call__(
|
553
|
-
self,
|
554
|
-
message: Optional[Union[str, PromptMessageMultipart]] = None,
|
555
|
-
agent_name: Optional[str] = None,
|
556
|
-
) -> str:
|
557
|
-
"""
|
558
|
-
Support: agent('message') or agent(Prompt.user('message'))
|
559
|
-
|
560
|
-
Args:
|
561
|
-
message: Either a string message or a PromptMessageMultipart object
|
562
|
-
agent_name: The name of the agent to use (uses default if None)
|
563
|
-
|
564
|
-
Returns:
|
565
|
-
The agent's response as a string
|
566
|
-
"""
|
567
|
-
target = agent_name or self._default
|
568
|
-
if not target:
|
569
|
-
raise ValueError("No default agent available")
|
570
|
-
return await self.send(message, target)
|
mcp_agent/core/agent_utils.py
DELETED
@@ -1,69 +0,0 @@
|
|
1
|
-
"""
|
2
|
-
Utility functions for agent operations.
|
3
|
-
"""
|
4
|
-
|
5
|
-
from typing import TYPE_CHECKING, List
|
6
|
-
|
7
|
-
from mcp_agent.event_progress import ProgressAction
|
8
|
-
|
9
|
-
# Handle circular imports
|
10
|
-
if TYPE_CHECKING:
|
11
|
-
from mcp_agent.core.proxies import BaseAgentProxy
|
12
|
-
from mcp_agent.core.types import AgentOrWorkflow, ProxyDict
|
13
|
-
else:
|
14
|
-
from mcp_agent.core.proxies import BaseAgentProxy
|
15
|
-
|
16
|
-
# Define minimal types for runtime
|
17
|
-
AgentOrWorkflow = object # Simple placeholder
|
18
|
-
ProxyDict = dict # Simple placeholder
|
19
|
-
|
20
|
-
|
21
|
-
def unwrap_proxy(proxy: BaseAgentProxy) -> AgentOrWorkflow:
|
22
|
-
"""
|
23
|
-
Unwrap a proxy to get the underlying agent or workflow instance.
|
24
|
-
|
25
|
-
Args:
|
26
|
-
proxy: The proxy object to unwrap
|
27
|
-
|
28
|
-
Returns:
|
29
|
-
The underlying Agent or workflow instance
|
30
|
-
"""
|
31
|
-
from mcp_agent.core.proxies import ChainProxy, LLMAgentProxy
|
32
|
-
|
33
|
-
if isinstance(proxy, LLMAgentProxy):
|
34
|
-
return proxy._agent
|
35
|
-
elif isinstance(proxy, ChainProxy):
|
36
|
-
# Return the ChainProxy itself as the workflow
|
37
|
-
return proxy
|
38
|
-
return proxy._workflow
|
39
|
-
|
40
|
-
|
41
|
-
def get_agent_instances(agent_names: List[str], active_agents: ProxyDict) -> List[AgentOrWorkflow]:
|
42
|
-
"""
|
43
|
-
Get list of actual agent/workflow instances from a list of names.
|
44
|
-
|
45
|
-
Args:
|
46
|
-
agent_names: List of agent names to look up
|
47
|
-
active_agents: Dictionary of active agent proxies
|
48
|
-
|
49
|
-
Returns:
|
50
|
-
List of unwrapped agent/workflow instances
|
51
|
-
"""
|
52
|
-
return [unwrap_proxy(active_agents[name]) for name in agent_names]
|
53
|
-
|
54
|
-
|
55
|
-
def log_agent_load(app, agent_name: str) -> None:
|
56
|
-
"""
|
57
|
-
Log agent loading event to application logger.
|
58
|
-
|
59
|
-
Args:
|
60
|
-
app: The application instance
|
61
|
-
agent_name: Name of the agent being loaded
|
62
|
-
"""
|
63
|
-
app._logger.info(
|
64
|
-
f"Loaded {agent_name}",
|
65
|
-
data={
|
66
|
-
"progress_action": ProgressAction.LOADED,
|
67
|
-
"agent_name": agent_name,
|
68
|
-
},
|
69
|
-
)
|