fast-agent-mcp 0.1.13__py3-none-any.whl → 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {fast_agent_mcp-0.1.13.dist-info → fast_agent_mcp-0.2.0.dist-info}/METADATA +3 -4
- fast_agent_mcp-0.2.0.dist-info/RECORD +123 -0
- mcp_agent/__init__.py +75 -0
- mcp_agent/agents/agent.py +59 -371
- mcp_agent/agents/base_agent.py +522 -0
- mcp_agent/agents/workflow/__init__.py +1 -0
- mcp_agent/agents/workflow/chain_agent.py +173 -0
- mcp_agent/agents/workflow/evaluator_optimizer.py +362 -0
- mcp_agent/agents/workflow/orchestrator_agent.py +591 -0
- mcp_agent/{workflows/orchestrator → agents/workflow}/orchestrator_models.py +27 -11
- mcp_agent/agents/workflow/parallel_agent.py +182 -0
- mcp_agent/agents/workflow/router_agent.py +307 -0
- mcp_agent/app.py +3 -1
- mcp_agent/cli/commands/bootstrap.py +18 -7
- mcp_agent/cli/commands/setup.py +12 -4
- mcp_agent/cli/main.py +1 -1
- mcp_agent/cli/terminal.py +1 -1
- mcp_agent/config.py +24 -35
- mcp_agent/context.py +3 -1
- mcp_agent/context_dependent.py +3 -1
- mcp_agent/core/agent_types.py +10 -7
- mcp_agent/core/direct_agent_app.py +179 -0
- mcp_agent/core/direct_decorators.py +443 -0
- mcp_agent/core/direct_factory.py +476 -0
- mcp_agent/core/enhanced_prompt.py +15 -20
- mcp_agent/core/fastagent.py +151 -337
- mcp_agent/core/interactive_prompt.py +424 -0
- mcp_agent/core/mcp_content.py +19 -11
- mcp_agent/core/prompt.py +6 -2
- mcp_agent/core/validation.py +89 -16
- mcp_agent/executor/decorator_registry.py +6 -2
- mcp_agent/executor/temporal.py +35 -11
- mcp_agent/executor/workflow_signal.py +8 -2
- mcp_agent/human_input/handler.py +3 -1
- mcp_agent/llm/__init__.py +2 -0
- mcp_agent/{workflows/llm → llm}/augmented_llm.py +131 -256
- mcp_agent/{workflows/llm → llm}/augmented_llm_passthrough.py +35 -107
- mcp_agent/llm/augmented_llm_playback.py +83 -0
- mcp_agent/{workflows/llm → llm}/model_factory.py +26 -8
- mcp_agent/llm/providers/__init__.py +8 -0
- mcp_agent/{workflows/llm → llm/providers}/anthropic_utils.py +5 -1
- mcp_agent/{workflows/llm → llm/providers}/augmented_llm_anthropic.py +37 -141
- mcp_agent/llm/providers/augmented_llm_deepseek.py +53 -0
- mcp_agent/{workflows/llm → llm/providers}/augmented_llm_openai.py +112 -148
- mcp_agent/{workflows/llm → llm}/providers/multipart_converter_anthropic.py +78 -35
- mcp_agent/{workflows/llm → llm}/providers/multipart_converter_openai.py +73 -44
- mcp_agent/{workflows/llm → llm}/providers/openai_multipart.py +18 -4
- mcp_agent/{workflows/llm → llm/providers}/openai_utils.py +3 -3
- mcp_agent/{workflows/llm → llm}/providers/sampling_converter_anthropic.py +3 -3
- mcp_agent/{workflows/llm → llm}/providers/sampling_converter_openai.py +3 -3
- mcp_agent/{workflows/llm → llm}/sampling_converter.py +0 -21
- mcp_agent/{workflows/llm → llm}/sampling_format_converter.py +16 -1
- mcp_agent/logging/logger.py +2 -2
- mcp_agent/mcp/gen_client.py +9 -3
- mcp_agent/mcp/interfaces.py +67 -45
- mcp_agent/mcp/logger_textio.py +97 -0
- mcp_agent/mcp/mcp_agent_client_session.py +12 -4
- mcp_agent/mcp/mcp_agent_server.py +3 -1
- mcp_agent/mcp/mcp_aggregator.py +124 -93
- mcp_agent/mcp/mcp_connection_manager.py +21 -7
- mcp_agent/mcp/prompt_message_multipart.py +59 -1
- mcp_agent/mcp/prompt_render.py +77 -0
- mcp_agent/mcp/prompt_serialization.py +20 -13
- mcp_agent/mcp/prompts/prompt_constants.py +18 -0
- mcp_agent/mcp/prompts/prompt_helpers.py +327 -0
- mcp_agent/mcp/prompts/prompt_load.py +15 -5
- mcp_agent/mcp/prompts/prompt_server.py +154 -87
- mcp_agent/mcp/prompts/prompt_template.py +26 -35
- mcp_agent/mcp/resource_utils.py +3 -1
- mcp_agent/mcp/sampling.py +24 -15
- mcp_agent/mcp_server/agent_server.py +8 -5
- mcp_agent/mcp_server_registry.py +22 -9
- mcp_agent/resources/examples/{workflows → in_dev}/agent_build.py +1 -1
- mcp_agent/resources/examples/{data-analysis → in_dev}/slides.py +1 -1
- mcp_agent/resources/examples/internal/agent.py +4 -2
- mcp_agent/resources/examples/internal/fastagent.config.yaml +8 -2
- mcp_agent/resources/examples/prompting/image_server.py +3 -1
- mcp_agent/resources/examples/prompting/work_with_image.py +19 -0
- mcp_agent/ui/console_display.py +27 -7
- fast_agent_mcp-0.1.13.dist-info/RECORD +0 -164
- mcp_agent/core/agent_app.py +0 -570
- mcp_agent/core/agent_utils.py +0 -69
- mcp_agent/core/decorators.py +0 -448
- mcp_agent/core/factory.py +0 -422
- mcp_agent/core/proxies.py +0 -278
- mcp_agent/core/types.py +0 -22
- mcp_agent/eval/__init__.py +0 -0
- mcp_agent/mcp/stdio.py +0 -114
- mcp_agent/resources/examples/data-analysis/analysis-campaign.py +0 -188
- mcp_agent/resources/examples/data-analysis/analysis.py +0 -65
- mcp_agent/resources/examples/data-analysis/fastagent.config.yaml +0 -41
- mcp_agent/resources/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +0 -1471
- mcp_agent/resources/examples/mcp_researcher/researcher-eval.py +0 -53
- mcp_agent/resources/examples/researcher/fastagent.config.yaml +0 -66
- mcp_agent/resources/examples/researcher/researcher-eval.py +0 -53
- mcp_agent/resources/examples/researcher/researcher-imp.py +0 -189
- mcp_agent/resources/examples/researcher/researcher.py +0 -39
- mcp_agent/resources/examples/workflows/chaining.py +0 -45
- mcp_agent/resources/examples/workflows/evaluator.py +0 -79
- mcp_agent/resources/examples/workflows/fastagent.config.yaml +0 -24
- mcp_agent/resources/examples/workflows/human_input.py +0 -26
- mcp_agent/resources/examples/workflows/orchestrator.py +0 -74
- mcp_agent/resources/examples/workflows/parallel.py +0 -79
- mcp_agent/resources/examples/workflows/router.py +0 -54
- mcp_agent/resources/examples/workflows/sse.py +0 -23
- mcp_agent/telemetry/__init__.py +0 -0
- mcp_agent/telemetry/usage_tracking.py +0 -19
- mcp_agent/workflows/__init__.py +0 -0
- mcp_agent/workflows/embedding/__init__.py +0 -0
- mcp_agent/workflows/embedding/embedding_base.py +0 -58
- mcp_agent/workflows/embedding/embedding_cohere.py +0 -49
- mcp_agent/workflows/embedding/embedding_openai.py +0 -37
- mcp_agent/workflows/evaluator_optimizer/__init__.py +0 -0
- mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py +0 -447
- mcp_agent/workflows/intent_classifier/__init__.py +0 -0
- mcp_agent/workflows/intent_classifier/intent_classifier_base.py +0 -117
- mcp_agent/workflows/intent_classifier/intent_classifier_embedding.py +0 -130
- mcp_agent/workflows/intent_classifier/intent_classifier_embedding_cohere.py +0 -41
- mcp_agent/workflows/intent_classifier/intent_classifier_embedding_openai.py +0 -41
- mcp_agent/workflows/intent_classifier/intent_classifier_llm.py +0 -150
- mcp_agent/workflows/intent_classifier/intent_classifier_llm_anthropic.py +0 -60
- mcp_agent/workflows/intent_classifier/intent_classifier_llm_openai.py +0 -58
- mcp_agent/workflows/llm/__init__.py +0 -0
- mcp_agent/workflows/llm/augmented_llm_playback.py +0 -111
- mcp_agent/workflows/llm/providers/__init__.py +0 -8
- mcp_agent/workflows/orchestrator/__init__.py +0 -0
- mcp_agent/workflows/orchestrator/orchestrator.py +0 -535
- mcp_agent/workflows/parallel/__init__.py +0 -0
- mcp_agent/workflows/parallel/fan_in.py +0 -320
- mcp_agent/workflows/parallel/fan_out.py +0 -181
- mcp_agent/workflows/parallel/parallel_llm.py +0 -149
- mcp_agent/workflows/router/__init__.py +0 -0
- mcp_agent/workflows/router/router_base.py +0 -338
- mcp_agent/workflows/router/router_embedding.py +0 -226
- mcp_agent/workflows/router/router_embedding_cohere.py +0 -59
- mcp_agent/workflows/router/router_embedding_openai.py +0 -59
- mcp_agent/workflows/router/router_llm.py +0 -304
- mcp_agent/workflows/swarm/__init__.py +0 -0
- mcp_agent/workflows/swarm/swarm.py +0 -292
- mcp_agent/workflows/swarm/swarm_anthropic.py +0 -42
- mcp_agent/workflows/swarm/swarm_openai.py +0 -41
- {fast_agent_mcp-0.1.13.dist-info → fast_agent_mcp-0.2.0.dist-info}/WHEEL +0 -0
- {fast_agent_mcp-0.1.13.dist-info → fast_agent_mcp-0.2.0.dist-info}/entry_points.txt +0 -0
- {fast_agent_mcp-0.1.13.dist-info → fast_agent_mcp-0.2.0.dist-info}/licenses/LICENSE +0 -0
- /mcp_agent/{workflows/orchestrator → agents/workflow}/orchestrator_prompts.py +0 -0
- /mcp_agent/{workflows/llm → llm}/memory.py +0 -0
- /mcp_agent/{workflows/llm → llm}/prompt_utils.py +0 -0
@@ -0,0 +1,424 @@
|
|
1
|
+
"""
|
2
|
+
Interactive prompt functionality for agents.
|
3
|
+
|
4
|
+
This module provides interactive command-line functionality for agents,
|
5
|
+
extracted from the original AgentApp implementation to support the new DirectAgentApp.
|
6
|
+
|
7
|
+
Usage:
|
8
|
+
prompt = InteractivePrompt()
|
9
|
+
await prompt.prompt_loop(
|
10
|
+
send_func=agent_app.send,
|
11
|
+
default_agent="default_agent",
|
12
|
+
available_agents=["agent1", "agent2"],
|
13
|
+
apply_prompt_func=agent_app.apply_prompt
|
14
|
+
)
|
15
|
+
"""
|
16
|
+
|
17
|
+
from typing import Dict, List, Optional
|
18
|
+
|
19
|
+
from rich import print as rich_print
|
20
|
+
from rich.console import Console
|
21
|
+
from rich.table import Table
|
22
|
+
|
23
|
+
from mcp_agent.core.enhanced_prompt import (
|
24
|
+
get_argument_input,
|
25
|
+
get_enhanced_input,
|
26
|
+
get_selection_input,
|
27
|
+
handle_special_commands,
|
28
|
+
)
|
29
|
+
from mcp_agent.progress_display import progress_display
|
30
|
+
|
31
|
+
|
32
|
+
class InteractivePrompt:
|
33
|
+
"""
|
34
|
+
Provides interactive prompt functionality that works with any agent implementation.
|
35
|
+
This is extracted from the original AgentApp implementation to support DirectAgentApp.
|
36
|
+
"""
|
37
|
+
|
38
|
+
def __init__(self, agent_types: Optional[Dict[str, str]] = None) -> None:
|
39
|
+
"""
|
40
|
+
Initialize the interactive prompt.
|
41
|
+
|
42
|
+
Args:
|
43
|
+
agent_types: Dictionary mapping agent names to their types for display
|
44
|
+
"""
|
45
|
+
self.agent_types = agent_types or {}
|
46
|
+
|
47
|
+
async def prompt_loop(
|
48
|
+
self,
|
49
|
+
send_func,
|
50
|
+
default_agent: str,
|
51
|
+
available_agents: List[str],
|
52
|
+
apply_prompt_func=None,
|
53
|
+
list_prompts_func=None,
|
54
|
+
default: str = "",
|
55
|
+
) -> str:
|
56
|
+
"""
|
57
|
+
Start an interactive prompt session.
|
58
|
+
|
59
|
+
Args:
|
60
|
+
send_func: Function to send messages to agents (signature: async (message, agent_name))
|
61
|
+
default_agent: Name of the default agent to use
|
62
|
+
available_agents: List of available agent names
|
63
|
+
apply_prompt_func: Optional function to apply prompts (signature: async (name, args, agent))
|
64
|
+
list_prompts_func: Optional function to list available prompts (signature: async (agent_name))
|
65
|
+
default: Default message to use when user presses enter
|
66
|
+
|
67
|
+
Returns:
|
68
|
+
The result of the interactive session
|
69
|
+
"""
|
70
|
+
agent = default_agent
|
71
|
+
if not agent:
|
72
|
+
if available_agents:
|
73
|
+
agent = available_agents[0]
|
74
|
+
else:
|
75
|
+
raise ValueError("No default agent available")
|
76
|
+
|
77
|
+
if agent not in available_agents:
|
78
|
+
raise ValueError(f"No agent named '{agent}'")
|
79
|
+
|
80
|
+
# Create agent_types dictionary if not provided
|
81
|
+
available_agents_set = set(available_agents)
|
82
|
+
|
83
|
+
result = ""
|
84
|
+
while True:
|
85
|
+
with progress_display.paused():
|
86
|
+
# Use the enhanced input method with advanced features
|
87
|
+
user_input = await get_enhanced_input(
|
88
|
+
agent_name=agent,
|
89
|
+
default=default,
|
90
|
+
show_default=(default != ""),
|
91
|
+
show_stop_hint=True,
|
92
|
+
multiline=False, # Default to single-line mode
|
93
|
+
available_agent_names=available_agents,
|
94
|
+
agent_types=self.agent_types, # Pass agent types for display
|
95
|
+
)
|
96
|
+
|
97
|
+
# Handle special commands - pass "True" to enable agent switching
|
98
|
+
command_result = await handle_special_commands(user_input, True)
|
99
|
+
|
100
|
+
# Check if we should switch agents
|
101
|
+
if isinstance(command_result, dict):
|
102
|
+
if "switch_agent" in command_result:
|
103
|
+
new_agent = command_result["switch_agent"]
|
104
|
+
if new_agent in available_agents_set:
|
105
|
+
agent = new_agent
|
106
|
+
continue
|
107
|
+
else:
|
108
|
+
rich_print(f"[red]Agent '{new_agent}' not found[/red]")
|
109
|
+
continue
|
110
|
+
elif "list_prompts" in command_result and list_prompts_func:
|
111
|
+
# Use the list_prompts_func directly
|
112
|
+
await self._list_prompts(list_prompts_func, agent)
|
113
|
+
continue
|
114
|
+
elif "select_prompt" in command_result and (list_prompts_func and apply_prompt_func):
|
115
|
+
# Handle prompt selection, using both list_prompts and apply_prompt
|
116
|
+
prompt_name = command_result.get("prompt_name")
|
117
|
+
await self._select_prompt(list_prompts_func, apply_prompt_func, agent, prompt_name)
|
118
|
+
continue
|
119
|
+
|
120
|
+
# Skip further processing if command was handled
|
121
|
+
if command_result:
|
122
|
+
continue
|
123
|
+
|
124
|
+
if user_input.upper() == "STOP":
|
125
|
+
return result
|
126
|
+
if user_input == "":
|
127
|
+
continue
|
128
|
+
|
129
|
+
# Send the message to the agent
|
130
|
+
result = await send_func(user_input, agent)
|
131
|
+
|
132
|
+
return result
|
133
|
+
|
134
|
+
async def _list_prompts(self, list_prompts_func, agent_name) -> None:
|
135
|
+
"""
|
136
|
+
List available prompts for an agent.
|
137
|
+
|
138
|
+
Args:
|
139
|
+
list_prompts_func: Function to get available prompts
|
140
|
+
agent_name: Name of the agent
|
141
|
+
"""
|
142
|
+
from rich import print as rich_print
|
143
|
+
|
144
|
+
try:
|
145
|
+
# Directly call the list_prompts function for this agent
|
146
|
+
rich_print(f"\n[bold]Fetching prompts for agent [cyan]{agent_name}[/cyan]...[/bold]")
|
147
|
+
|
148
|
+
prompt_servers = await list_prompts_func(agent_name)
|
149
|
+
|
150
|
+
# Process the returned prompt servers
|
151
|
+
if prompt_servers:
|
152
|
+
found_prompts = False
|
153
|
+
for server_name, prompts_info in prompt_servers.items():
|
154
|
+
if prompts_info and hasattr(prompts_info, "prompts") and prompts_info.prompts:
|
155
|
+
rich_print(f"\n[bold cyan]{server_name}:[/bold cyan]")
|
156
|
+
for prompt in prompts_info.prompts:
|
157
|
+
rich_print(f" {prompt.name}")
|
158
|
+
found_prompts = True
|
159
|
+
elif isinstance(prompts_info, list) and prompts_info:
|
160
|
+
rich_print(f"\n[bold cyan]{server_name}:[/bold cyan]")
|
161
|
+
for prompt in prompts_info:
|
162
|
+
if isinstance(prompt, dict) and "name" in prompt:
|
163
|
+
rich_print(f" {prompt['name']}")
|
164
|
+
else:
|
165
|
+
rich_print(f" {prompt}")
|
166
|
+
found_prompts = True
|
167
|
+
|
168
|
+
if not found_prompts:
|
169
|
+
rich_print("[yellow]No prompts available[/yellow]")
|
170
|
+
else:
|
171
|
+
rich_print("[yellow]No prompts available[/yellow]")
|
172
|
+
except Exception as e:
|
173
|
+
import traceback
|
174
|
+
rich_print(f"[red]Error listing prompts: {e}[/red]")
|
175
|
+
rich_print(f"[dim]{traceback.format_exc()}[/dim]")
|
176
|
+
|
177
|
+
async def _select_prompt(self, list_prompts_func, apply_prompt_func, agent_name, requested_name=None) -> None:
|
178
|
+
"""
|
179
|
+
Select and apply a prompt.
|
180
|
+
|
181
|
+
Args:
|
182
|
+
list_prompts_func: Function to get available prompts
|
183
|
+
apply_prompt_func: Function to apply prompts
|
184
|
+
agent_name: Name of the agent
|
185
|
+
requested_name: Optional name of the prompt to apply
|
186
|
+
"""
|
187
|
+
# We already imported these at the top
|
188
|
+
from rich import print as rich_print
|
189
|
+
|
190
|
+
console = Console()
|
191
|
+
|
192
|
+
try:
|
193
|
+
# Get all available prompts directly from the list_prompts function
|
194
|
+
rich_print(f"\n[bold]Fetching prompts for agent [cyan]{agent_name}[/cyan]...[/bold]")
|
195
|
+
prompt_servers = await list_prompts_func(agent_name)
|
196
|
+
|
197
|
+
if not prompt_servers:
|
198
|
+
rich_print("[yellow]No prompts available for this agent[/yellow]")
|
199
|
+
return
|
200
|
+
|
201
|
+
# Process fetched prompts
|
202
|
+
all_prompts = []
|
203
|
+
for server_name, prompts_info in prompt_servers.items():
|
204
|
+
if not prompts_info:
|
205
|
+
continue
|
206
|
+
|
207
|
+
# Extract prompts
|
208
|
+
prompts = []
|
209
|
+
if hasattr(prompts_info, "prompts"):
|
210
|
+
prompts = prompts_info.prompts
|
211
|
+
elif isinstance(prompts_info, list):
|
212
|
+
prompts = prompts_info
|
213
|
+
|
214
|
+
# Process each prompt
|
215
|
+
for prompt in prompts:
|
216
|
+
# Get basic prompt info
|
217
|
+
prompt_name = getattr(prompt, "name", "Unknown")
|
218
|
+
description = getattr(prompt, "description", "No description")
|
219
|
+
|
220
|
+
# Extract argument information
|
221
|
+
arg_names = []
|
222
|
+
required_args = []
|
223
|
+
optional_args = []
|
224
|
+
arg_descriptions = {}
|
225
|
+
|
226
|
+
# Get arguments list
|
227
|
+
arguments = getattr(prompt, "arguments", None)
|
228
|
+
if arguments:
|
229
|
+
for arg in arguments:
|
230
|
+
name = getattr(arg, "name", None)
|
231
|
+
if name:
|
232
|
+
arg_names.append(name)
|
233
|
+
|
234
|
+
# Store description if available
|
235
|
+
description = getattr(arg, "description", None)
|
236
|
+
if description:
|
237
|
+
arg_descriptions[name] = description
|
238
|
+
|
239
|
+
# Check if required
|
240
|
+
if getattr(arg, "required", False):
|
241
|
+
required_args.append(name)
|
242
|
+
else:
|
243
|
+
optional_args.append(name)
|
244
|
+
|
245
|
+
# Create namespaced version
|
246
|
+
namespaced_name = f"{server_name}-{prompt_name}"
|
247
|
+
|
248
|
+
# Add to collection
|
249
|
+
all_prompts.append(
|
250
|
+
{
|
251
|
+
"server": server_name,
|
252
|
+
"name": prompt_name,
|
253
|
+
"namespaced_name": namespaced_name,
|
254
|
+
"description": description,
|
255
|
+
"arg_count": len(arg_names),
|
256
|
+
"arg_names": arg_names,
|
257
|
+
"required_args": required_args,
|
258
|
+
"optional_args": optional_args,
|
259
|
+
"arg_descriptions": arg_descriptions,
|
260
|
+
}
|
261
|
+
)
|
262
|
+
|
263
|
+
if not all_prompts:
|
264
|
+
rich_print("[yellow]No prompts available for this agent[/yellow]")
|
265
|
+
return
|
266
|
+
|
267
|
+
# Sort prompts by server then name
|
268
|
+
all_prompts.sort(key=lambda p: (p["server"], p["name"]))
|
269
|
+
|
270
|
+
# Handle specifically requested prompt
|
271
|
+
if requested_name:
|
272
|
+
matching_prompts = [
|
273
|
+
p
|
274
|
+
for p in all_prompts
|
275
|
+
if p["name"] == requested_name or p["namespaced_name"] == requested_name
|
276
|
+
]
|
277
|
+
|
278
|
+
if not matching_prompts:
|
279
|
+
rich_print(f"[red]Prompt '{requested_name}' not found[/red]")
|
280
|
+
rich_print("[yellow]Available prompts:[/yellow]")
|
281
|
+
for p in all_prompts:
|
282
|
+
rich_print(f" {p['namespaced_name']}")
|
283
|
+
return
|
284
|
+
|
285
|
+
# If exactly one match, use it
|
286
|
+
if len(matching_prompts) == 1:
|
287
|
+
selected_prompt = matching_prompts[0]
|
288
|
+
else:
|
289
|
+
# Handle multiple matches
|
290
|
+
rich_print(f"[yellow]Multiple prompts match '{requested_name}':[/yellow]")
|
291
|
+
for i, p in enumerate(matching_prompts):
|
292
|
+
rich_print(f" {i + 1}. {p['namespaced_name']} - {p['description']}")
|
293
|
+
|
294
|
+
# Get user selection
|
295
|
+
selection = (
|
296
|
+
await get_selection_input("Enter prompt number to select: ", default="1")
|
297
|
+
or ""
|
298
|
+
)
|
299
|
+
|
300
|
+
try:
|
301
|
+
idx = int(selection) - 1
|
302
|
+
if 0 <= idx < len(matching_prompts):
|
303
|
+
selected_prompt = matching_prompts[idx]
|
304
|
+
else:
|
305
|
+
rich_print("[red]Invalid selection[/red]")
|
306
|
+
return
|
307
|
+
except ValueError:
|
308
|
+
rich_print("[red]Invalid input, please enter a number[/red]")
|
309
|
+
return
|
310
|
+
else:
|
311
|
+
# Show prompt selection UI
|
312
|
+
table = Table(title="Available MCP Prompts")
|
313
|
+
table.add_column("#", justify="right", style="cyan")
|
314
|
+
table.add_column("Server", style="green")
|
315
|
+
table.add_column("Prompt Name", style="bright_blue")
|
316
|
+
table.add_column("Description")
|
317
|
+
table.add_column("Args", justify="center")
|
318
|
+
|
319
|
+
# Add prompts to table
|
320
|
+
for i, prompt in enumerate(all_prompts):
|
321
|
+
required_args = prompt["required_args"]
|
322
|
+
optional_args = prompt["optional_args"]
|
323
|
+
|
324
|
+
# Format args column
|
325
|
+
if required_args and optional_args:
|
326
|
+
args_display = f"[bold]{len(required_args)}[/bold]+{len(optional_args)}"
|
327
|
+
elif required_args:
|
328
|
+
args_display = f"[bold]{len(required_args)}[/bold]"
|
329
|
+
elif optional_args:
|
330
|
+
args_display = f"{len(optional_args)} opt"
|
331
|
+
else:
|
332
|
+
args_display = "0"
|
333
|
+
|
334
|
+
table.add_row(
|
335
|
+
str(i + 1),
|
336
|
+
prompt["server"],
|
337
|
+
prompt["name"],
|
338
|
+
prompt["description"] or "No description",
|
339
|
+
args_display,
|
340
|
+
)
|
341
|
+
|
342
|
+
console.print(table)
|
343
|
+
prompt_names = [str(i + 1) for i in range(len(all_prompts))]
|
344
|
+
|
345
|
+
# Get user selection
|
346
|
+
selection = await get_selection_input(
|
347
|
+
"Enter prompt number to select (or press Enter to cancel): ",
|
348
|
+
options=prompt_names,
|
349
|
+
allow_cancel=True,
|
350
|
+
)
|
351
|
+
|
352
|
+
# Handle cancellation
|
353
|
+
if not selection or selection.strip() == "":
|
354
|
+
rich_print("[yellow]Prompt selection cancelled[/yellow]")
|
355
|
+
return
|
356
|
+
|
357
|
+
try:
|
358
|
+
idx = int(selection) - 1
|
359
|
+
if 0 <= idx < len(all_prompts):
|
360
|
+
selected_prompt = all_prompts[idx]
|
361
|
+
else:
|
362
|
+
rich_print("[red]Invalid selection[/red]")
|
363
|
+
return
|
364
|
+
except ValueError:
|
365
|
+
rich_print("[red]Invalid input, please enter a number[/red]")
|
366
|
+
return
|
367
|
+
|
368
|
+
# Get prompt arguments
|
369
|
+
required_args = selected_prompt["required_args"]
|
370
|
+
optional_args = selected_prompt["optional_args"]
|
371
|
+
arg_descriptions = selected_prompt.get("arg_descriptions", {})
|
372
|
+
arg_values = {}
|
373
|
+
|
374
|
+
# Show argument info if any
|
375
|
+
if required_args or optional_args:
|
376
|
+
if required_args and optional_args:
|
377
|
+
rich_print(
|
378
|
+
f"\n[bold]Prompt [cyan]{selected_prompt['name']}[/cyan] requires {len(required_args)} arguments and has {len(optional_args)} optional arguments:[/bold]"
|
379
|
+
)
|
380
|
+
elif required_args:
|
381
|
+
rich_print(
|
382
|
+
f"\n[bold]Prompt [cyan]{selected_prompt['name']}[/cyan] requires {len(required_args)} arguments:[/bold]"
|
383
|
+
)
|
384
|
+
elif optional_args:
|
385
|
+
rich_print(
|
386
|
+
f"\n[bold]Prompt [cyan]{selected_prompt['name']}[/cyan] has {len(optional_args)} optional arguments:[/bold]"
|
387
|
+
)
|
388
|
+
|
389
|
+
# Collect required arguments
|
390
|
+
for arg_name in required_args:
|
391
|
+
description = arg_descriptions.get(arg_name, "")
|
392
|
+
arg_value = await get_argument_input(
|
393
|
+
arg_name=arg_name,
|
394
|
+
description=description,
|
395
|
+
required=True,
|
396
|
+
)
|
397
|
+
if arg_value is not None:
|
398
|
+
arg_values[arg_name] = arg_value
|
399
|
+
|
400
|
+
# Collect optional arguments
|
401
|
+
if optional_args:
|
402
|
+
for arg_name in optional_args:
|
403
|
+
description = arg_descriptions.get(arg_name, "")
|
404
|
+
arg_value = await get_argument_input(
|
405
|
+
arg_name=arg_name,
|
406
|
+
description=description,
|
407
|
+
required=False,
|
408
|
+
)
|
409
|
+
if arg_value:
|
410
|
+
arg_values[arg_name] = arg_value
|
411
|
+
|
412
|
+
# Apply the prompt
|
413
|
+
rich_print(
|
414
|
+
f"\n[bold]Applying prompt [cyan]{selected_prompt['namespaced_name']}[/cyan]...[/bold]"
|
415
|
+
)
|
416
|
+
|
417
|
+
# Call apply_prompt function with the prompt name and arguments
|
418
|
+
await apply_prompt_func(selected_prompt["namespaced_name"], arg_values, agent_name)
|
419
|
+
|
420
|
+
except Exception as e:
|
421
|
+
import traceback
|
422
|
+
|
423
|
+
rich_print(f"[red]Error selecting or applying prompt: {e}[/red]")
|
424
|
+
rich_print(f"[dim]{traceback.format_exc()}[/dim]")
|
mcp_agent/core/mcp_content.py
CHANGED
@@ -10,12 +10,14 @@ from pathlib import Path
|
|
10
10
|
from typing import Any, List, Literal, Optional, Union
|
11
11
|
|
12
12
|
from mcp.types import (
|
13
|
+
Annotations,
|
13
14
|
BlobResourceContents,
|
14
15
|
EmbeddedResource,
|
15
16
|
ImageContent,
|
16
17
|
TextContent,
|
17
18
|
TextResourceContents,
|
18
19
|
)
|
20
|
+
from pydantic import AnyUrl
|
19
21
|
|
20
22
|
from mcp_agent.mcp.mime_utils import (
|
21
23
|
guess_mime_type,
|
@@ -27,7 +29,7 @@ from mcp_agent.mcp.mime_utils import (
|
|
27
29
|
def MCPText(
|
28
30
|
text: str,
|
29
31
|
role: Literal["user", "assistant"] = "user",
|
30
|
-
annotations:
|
32
|
+
annotations: Annotations = None,
|
31
33
|
) -> dict:
|
32
34
|
"""
|
33
35
|
Create a message with text content.
|
@@ -47,11 +49,11 @@ def MCPText(
|
|
47
49
|
|
48
50
|
|
49
51
|
def MCPImage(
|
50
|
-
path:
|
51
|
-
data: bytes = None,
|
52
|
+
path: str | Path | None = None,
|
53
|
+
data: bytes | None = None,
|
52
54
|
mime_type: Optional[str] = None,
|
53
55
|
role: Literal["user", "assistant"] = "user",
|
54
|
-
annotations:
|
56
|
+
annotations: Annotations | None = None,
|
55
57
|
) -> dict:
|
56
58
|
"""
|
57
59
|
Create a message with image content.
|
@@ -86,7 +88,9 @@ def MCPImage(
|
|
86
88
|
|
87
89
|
return {
|
88
90
|
"role": role,
|
89
|
-
"content": ImageContent(
|
91
|
+
"content": ImageContent(
|
92
|
+
type="image", data=b64_data, mimeType=mime_type, annotations=annotations
|
93
|
+
),
|
90
94
|
}
|
91
95
|
|
92
96
|
|
@@ -94,7 +98,7 @@ def MCPFile(
|
|
94
98
|
path: Union[str, Path],
|
95
99
|
mime_type: Optional[str] = None,
|
96
100
|
role: Literal["user", "assistant"] = "user",
|
97
|
-
annotations:
|
101
|
+
annotations: Annotations | None = None,
|
98
102
|
) -> dict:
|
99
103
|
"""
|
100
104
|
Create a message with an embedded resource from a file.
|
@@ -122,17 +126,19 @@ def MCPFile(
|
|
122
126
|
binary_data = path.read_bytes()
|
123
127
|
b64_data = base64.b64encode(binary_data).decode("ascii")
|
124
128
|
|
125
|
-
resource = BlobResourceContents(uri=uri, blob=b64_data, mimeType=mime_type)
|
129
|
+
resource = BlobResourceContents(uri=AnyUrl(uri), blob=b64_data, mimeType=mime_type)
|
126
130
|
else:
|
127
131
|
# Read as text
|
128
132
|
try:
|
129
133
|
text_data = path.read_text(encoding="utf-8")
|
130
|
-
resource = TextResourceContents(uri=uri, text=text_data, mimeType=mime_type)
|
134
|
+
resource = TextResourceContents(uri=AnyUrl(uri), text=text_data, mimeType=mime_type)
|
131
135
|
except UnicodeDecodeError:
|
132
136
|
# Fallback to binary if text read fails
|
133
137
|
binary_data = path.read_bytes()
|
134
138
|
b64_data = base64.b64encode(binary_data).decode("ascii")
|
135
|
-
resource = BlobResourceContents(
|
139
|
+
resource = BlobResourceContents(
|
140
|
+
uri=AnyUrl(uri), blob=b64_data, mimeType=mime_type or "application/octet-stream"
|
141
|
+
)
|
136
142
|
|
137
143
|
return {
|
138
144
|
"role": role,
|
@@ -140,7 +146,9 @@ def MCPFile(
|
|
140
146
|
}
|
141
147
|
|
142
148
|
|
143
|
-
def MCPPrompt(
|
149
|
+
def MCPPrompt(
|
150
|
+
*content_items: Union[dict, str, Path, bytes], role: Literal["user", "assistant"] = "user"
|
151
|
+
) -> List[dict]:
|
144
152
|
"""
|
145
153
|
Create one or more prompt messages with various content types.
|
146
154
|
|
@@ -164,7 +172,7 @@ def MCPPrompt(*content_items, role: Literal["user", "assistant"] = "user") -> Li
|
|
164
172
|
if isinstance(item, dict) and "role" in item and "content" in item:
|
165
173
|
# Already a fully formed message
|
166
174
|
result.append(item)
|
167
|
-
elif isinstance(item, str)
|
175
|
+
elif isinstance(item, str):
|
168
176
|
# Simple text content (that's not a file path)
|
169
177
|
result.append(MCPText(item, role=role))
|
170
178
|
elif isinstance(item, Path) or isinstance(item, str):
|
mcp_agent/core/prompt.py
CHANGED
@@ -54,10 +54,14 @@ class Prompt:
|
|
54
54
|
A PromptMessageMultipart with assistant role and the specified content
|
55
55
|
"""
|
56
56
|
messages = Assistant(*content_items)
|
57
|
-
return PromptMessageMultipart(
|
57
|
+
return PromptMessageMultipart(
|
58
|
+
role="assistant", content=[msg["content"] for msg in messages]
|
59
|
+
)
|
58
60
|
|
59
61
|
@classmethod
|
60
|
-
def message(
|
62
|
+
def message(
|
63
|
+
cls, *content_items, role: Literal["user", "assistant"] = "user"
|
64
|
+
) -> PromptMessageMultipart:
|
61
65
|
"""
|
62
66
|
Create a PromptMessageMultipart with the specified role and content items.
|
63
67
|
|
mcp_agent/core/validation.py
CHANGED
@@ -10,7 +10,7 @@ from mcp_agent.core.exceptions import (
|
|
10
10
|
CircularDependencyError,
|
11
11
|
ServerConfigError,
|
12
12
|
)
|
13
|
-
from mcp_agent.
|
13
|
+
from mcp_agent.llm.augmented_llm import AugmentedLLM
|
14
14
|
|
15
15
|
|
16
16
|
def validate_server_references(context, agents: Dict[str, Dict[str, Any]]) -> None:
|
@@ -56,7 +56,7 @@ def validate_workflow_references(agents: Dict[str, Dict[str, Any]]) -> None:
|
|
56
56
|
if agent_type == AgentType.PARALLEL.value:
|
57
57
|
# Check fan_in exists
|
58
58
|
fan_in = agent_data["fan_in"]
|
59
|
-
if fan_in not in available_components:
|
59
|
+
if fan_in and fan_in not in available_components:
|
60
60
|
raise AgentConfigError(
|
61
61
|
f"Parallel workflow '{name}' references non-existent fan_in component: {fan_in}"
|
62
62
|
)
|
@@ -106,7 +106,7 @@ def validate_workflow_references(agents: Dict[str, Dict[str, Any]]) -> None:
|
|
106
106
|
|
107
107
|
elif agent_type == AgentType.ROUTER.value:
|
108
108
|
# Check all referenced agents exist
|
109
|
-
router_agents = agent_data["
|
109
|
+
router_agents = agent_data["router_agents"]
|
110
110
|
missing = [a for a in router_agents if a not in available_components]
|
111
111
|
if missing:
|
112
112
|
raise AgentConfigError(
|
@@ -187,7 +187,7 @@ def get_dependencies(
|
|
187
187
|
deps.extend(get_dependencies(fan_out, agents, visited, path, agent_type))
|
188
188
|
elif config["type"] == AgentType.CHAIN.value:
|
189
189
|
# Get dependencies from sequence agents
|
190
|
-
sequence = config.get("sequence", config.get("
|
190
|
+
sequence = config.get("sequence", config.get("router_agents", []))
|
191
191
|
for agent_name in sequence:
|
192
192
|
deps.extend(get_dependencies(agent_name, agents, visited, path, agent_type))
|
193
193
|
|
@@ -199,23 +199,96 @@ def get_dependencies(
|
|
199
199
|
return deps
|
200
200
|
|
201
201
|
|
202
|
-
def
|
203
|
-
|
204
|
-
) -> List[str]:
|
202
|
+
def get_dependencies_groups(
|
203
|
+
agents_dict: Dict[str, Dict[str, Any]], allow_cycles: bool = False
|
204
|
+
) -> List[List[str]]:
|
205
205
|
"""
|
206
|
-
Get dependencies
|
207
|
-
|
206
|
+
Get dependencies between agents and group them into dependency layers.
|
207
|
+
Each layer can be initialized in parallel.
|
208
208
|
|
209
209
|
Args:
|
210
|
-
|
211
|
-
|
212
|
-
visited: Set of already visited agents
|
213
|
-
path: Current path for cycle detection
|
210
|
+
agents_dict: Dictionary of agent configurations
|
211
|
+
allow_cycles: Whether to allow cyclic dependencies
|
214
212
|
|
215
213
|
Returns:
|
216
|
-
List of
|
214
|
+
List of lists, where each inner list is a group of agents that can be initialized together
|
217
215
|
|
218
216
|
Raises:
|
219
|
-
CircularDependencyError: If circular dependency detected
|
217
|
+
CircularDependencyError: If circular dependency detected and allow_cycles is False
|
220
218
|
"""
|
221
|
-
|
219
|
+
# Get all agent names
|
220
|
+
agent_names = list(agents_dict.keys())
|
221
|
+
|
222
|
+
# Dictionary to store dependencies for each agent
|
223
|
+
dependencies = {name: set() for name in agent_names}
|
224
|
+
|
225
|
+
# Build the dependency graph
|
226
|
+
for name, agent_data in agents_dict.items():
|
227
|
+
agent_type = agent_data["type"]
|
228
|
+
|
229
|
+
if agent_type == AgentType.PARALLEL.value:
|
230
|
+
# Parallel agents depend on their fan-out and fan-in agents
|
231
|
+
dependencies[name].update(agent_data.get("parallel_agents", []))
|
232
|
+
elif agent_type == AgentType.CHAIN.value:
|
233
|
+
# Chain agents depend on the agents in their sequence
|
234
|
+
dependencies[name].update(agent_data.get("chain_agents", []))
|
235
|
+
elif agent_type == AgentType.ROUTER.value:
|
236
|
+
# Router agents depend on the agents they route to
|
237
|
+
dependencies[name].update(agent_data.get("router_agents", []))
|
238
|
+
elif agent_type == AgentType.ORCHESTRATOR.value:
|
239
|
+
# Orchestrator agents depend on their child agents
|
240
|
+
dependencies[name].update(agent_data.get("child_agents", []))
|
241
|
+
elif agent_type == AgentType.EVALUATOR_OPTIMIZER.value:
|
242
|
+
# Evaluator-Optimizer agents depend on their evaluation and optimization agents
|
243
|
+
dependencies[name].update(agent_data.get("eval_optimizer_agents", []))
|
244
|
+
|
245
|
+
# Check for cycles if not allowed
|
246
|
+
if not allow_cycles:
|
247
|
+
visited = set()
|
248
|
+
path = set()
|
249
|
+
|
250
|
+
def visit(node) -> None:
|
251
|
+
if node in path:
|
252
|
+
path_str = " -> ".join(path) + " -> " + node
|
253
|
+
raise CircularDependencyError(f"Circular dependency detected: {path_str}")
|
254
|
+
if node in visited:
|
255
|
+
return
|
256
|
+
|
257
|
+
path.add(node)
|
258
|
+
for dep in dependencies[node]:
|
259
|
+
if dep in agent_names: # Skip dependencies to non-existent agents
|
260
|
+
visit(dep)
|
261
|
+
path.remove(node)
|
262
|
+
visited.add(node)
|
263
|
+
|
264
|
+
# Check each node
|
265
|
+
for name in agent_names:
|
266
|
+
if name not in visited:
|
267
|
+
visit(name)
|
268
|
+
|
269
|
+
# Group agents by dependency level
|
270
|
+
result = []
|
271
|
+
remaining = set(agent_names)
|
272
|
+
|
273
|
+
while remaining:
|
274
|
+
# Find all agents that have no remaining dependencies
|
275
|
+
current_level = set()
|
276
|
+
for name in remaining:
|
277
|
+
if not dependencies[name] & remaining: # If no dependencies in remaining agents
|
278
|
+
current_level.add(name)
|
279
|
+
|
280
|
+
if not current_level:
|
281
|
+
if allow_cycles:
|
282
|
+
# If cycles are allowed, just add one remaining node to break the cycle
|
283
|
+
current_level.add(next(iter(remaining)))
|
284
|
+
else:
|
285
|
+
# This should not happen if we checked for cycles
|
286
|
+
raise CircularDependencyError("Unresolvable dependency cycle detected")
|
287
|
+
|
288
|
+
# Add the current level to the result
|
289
|
+
result.append(list(current_level))
|
290
|
+
|
291
|
+
# Remove current level from remaining
|
292
|
+
remaining -= current_level
|
293
|
+
|
294
|
+
return result
|