fast-agent-mcp 0.1.12__py3-none-any.whl → 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {fast_agent_mcp-0.1.12.dist-info → fast_agent_mcp-0.2.0.dist-info}/METADATA +3 -4
- fast_agent_mcp-0.2.0.dist-info/RECORD +123 -0
- mcp_agent/__init__.py +75 -0
- mcp_agent/agents/agent.py +61 -415
- mcp_agent/agents/base_agent.py +522 -0
- mcp_agent/agents/workflow/__init__.py +1 -0
- mcp_agent/agents/workflow/chain_agent.py +173 -0
- mcp_agent/agents/workflow/evaluator_optimizer.py +362 -0
- mcp_agent/agents/workflow/orchestrator_agent.py +591 -0
- mcp_agent/{workflows/orchestrator → agents/workflow}/orchestrator_models.py +11 -21
- mcp_agent/agents/workflow/parallel_agent.py +182 -0
- mcp_agent/agents/workflow/router_agent.py +307 -0
- mcp_agent/app.py +15 -19
- mcp_agent/cli/commands/bootstrap.py +19 -38
- mcp_agent/cli/commands/config.py +4 -4
- mcp_agent/cli/commands/setup.py +7 -14
- mcp_agent/cli/main.py +7 -10
- mcp_agent/cli/terminal.py +3 -3
- mcp_agent/config.py +25 -40
- mcp_agent/context.py +12 -21
- mcp_agent/context_dependent.py +3 -5
- mcp_agent/core/agent_types.py +10 -7
- mcp_agent/core/direct_agent_app.py +179 -0
- mcp_agent/core/direct_decorators.py +443 -0
- mcp_agent/core/direct_factory.py +476 -0
- mcp_agent/core/enhanced_prompt.py +23 -55
- mcp_agent/core/exceptions.py +8 -8
- mcp_agent/core/fastagent.py +145 -371
- mcp_agent/core/interactive_prompt.py +424 -0
- mcp_agent/core/mcp_content.py +17 -17
- mcp_agent/core/prompt.py +6 -9
- mcp_agent/core/request_params.py +6 -3
- mcp_agent/core/validation.py +92 -18
- mcp_agent/executor/decorator_registry.py +9 -17
- mcp_agent/executor/executor.py +8 -17
- mcp_agent/executor/task_registry.py +2 -4
- mcp_agent/executor/temporal.py +19 -41
- mcp_agent/executor/workflow.py +3 -5
- mcp_agent/executor/workflow_signal.py +15 -21
- mcp_agent/human_input/handler.py +4 -7
- mcp_agent/human_input/types.py +2 -3
- mcp_agent/llm/__init__.py +2 -0
- mcp_agent/llm/augmented_llm.py +450 -0
- mcp_agent/llm/augmented_llm_passthrough.py +162 -0
- mcp_agent/llm/augmented_llm_playback.py +83 -0
- mcp_agent/llm/memory.py +103 -0
- mcp_agent/{workflows/llm → llm}/model_factory.py +22 -16
- mcp_agent/{workflows/llm → llm}/prompt_utils.py +1 -3
- mcp_agent/llm/providers/__init__.py +8 -0
- mcp_agent/{workflows/llm → llm/providers}/anthropic_utils.py +8 -25
- mcp_agent/{workflows/llm → llm/providers}/augmented_llm_anthropic.py +56 -194
- mcp_agent/llm/providers/augmented_llm_deepseek.py +53 -0
- mcp_agent/{workflows/llm → llm/providers}/augmented_llm_openai.py +99 -190
- mcp_agent/{workflows/llm → llm}/providers/multipart_converter_anthropic.py +72 -71
- mcp_agent/{workflows/llm → llm}/providers/multipart_converter_openai.py +65 -71
- mcp_agent/{workflows/llm → llm}/providers/openai_multipart.py +16 -44
- mcp_agent/{workflows/llm → llm/providers}/openai_utils.py +4 -4
- mcp_agent/{workflows/llm → llm}/providers/sampling_converter_anthropic.py +9 -11
- mcp_agent/{workflows/llm → llm}/providers/sampling_converter_openai.py +8 -12
- mcp_agent/{workflows/llm → llm}/sampling_converter.py +3 -31
- mcp_agent/llm/sampling_format_converter.py +37 -0
- mcp_agent/logging/events.py +1 -5
- mcp_agent/logging/json_serializer.py +7 -6
- mcp_agent/logging/listeners.py +20 -23
- mcp_agent/logging/logger.py +17 -19
- mcp_agent/logging/rich_progress.py +10 -8
- mcp_agent/logging/tracing.py +4 -6
- mcp_agent/logging/transport.py +22 -22
- mcp_agent/mcp/gen_client.py +1 -3
- mcp_agent/mcp/interfaces.py +117 -110
- mcp_agent/mcp/logger_textio.py +97 -0
- mcp_agent/mcp/mcp_agent_client_session.py +7 -7
- mcp_agent/mcp/mcp_agent_server.py +8 -8
- mcp_agent/mcp/mcp_aggregator.py +102 -143
- mcp_agent/mcp/mcp_connection_manager.py +20 -27
- mcp_agent/mcp/prompt_message_multipart.py +68 -16
- mcp_agent/mcp/prompt_render.py +77 -0
- mcp_agent/mcp/prompt_serialization.py +30 -48
- mcp_agent/mcp/prompts/prompt_constants.py +18 -0
- mcp_agent/mcp/prompts/prompt_helpers.py +327 -0
- mcp_agent/mcp/prompts/prompt_load.py +109 -0
- mcp_agent/mcp/prompts/prompt_server.py +155 -195
- mcp_agent/mcp/prompts/prompt_template.py +35 -66
- mcp_agent/mcp/resource_utils.py +7 -14
- mcp_agent/mcp/sampling.py +17 -17
- mcp_agent/mcp_server/agent_server.py +13 -17
- mcp_agent/mcp_server_registry.py +13 -22
- mcp_agent/resources/examples/{workflows → in_dev}/agent_build.py +3 -2
- mcp_agent/resources/examples/in_dev/slides.py +110 -0
- mcp_agent/resources/examples/internal/agent.py +6 -3
- mcp_agent/resources/examples/internal/fastagent.config.yaml +8 -2
- mcp_agent/resources/examples/internal/job.py +2 -1
- mcp_agent/resources/examples/internal/prompt_category.py +1 -1
- mcp_agent/resources/examples/internal/prompt_sizing.py +3 -5
- mcp_agent/resources/examples/internal/sizer.py +2 -1
- mcp_agent/resources/examples/internal/social.py +2 -1
- mcp_agent/resources/examples/prompting/agent.py +2 -1
- mcp_agent/resources/examples/prompting/image_server.py +4 -8
- mcp_agent/resources/examples/prompting/work_with_image.py +19 -0
- mcp_agent/ui/console_display.py +16 -20
- fast_agent_mcp-0.1.12.dist-info/RECORD +0 -161
- mcp_agent/core/agent_app.py +0 -646
- mcp_agent/core/agent_utils.py +0 -71
- mcp_agent/core/decorators.py +0 -455
- mcp_agent/core/factory.py +0 -463
- mcp_agent/core/proxies.py +0 -269
- mcp_agent/core/types.py +0 -24
- mcp_agent/eval/__init__.py +0 -0
- mcp_agent/mcp/stdio.py +0 -111
- mcp_agent/resources/examples/data-analysis/analysis-campaign.py +0 -188
- mcp_agent/resources/examples/data-analysis/analysis.py +0 -65
- mcp_agent/resources/examples/data-analysis/fastagent.config.yaml +0 -41
- mcp_agent/resources/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +0 -1471
- mcp_agent/resources/examples/mcp_researcher/researcher-eval.py +0 -53
- mcp_agent/resources/examples/researcher/fastagent.config.yaml +0 -66
- mcp_agent/resources/examples/researcher/researcher-eval.py +0 -53
- mcp_agent/resources/examples/researcher/researcher-imp.py +0 -190
- mcp_agent/resources/examples/researcher/researcher.py +0 -38
- mcp_agent/resources/examples/workflows/chaining.py +0 -44
- mcp_agent/resources/examples/workflows/evaluator.py +0 -78
- mcp_agent/resources/examples/workflows/fastagent.config.yaml +0 -24
- mcp_agent/resources/examples/workflows/human_input.py +0 -25
- mcp_agent/resources/examples/workflows/orchestrator.py +0 -73
- mcp_agent/resources/examples/workflows/parallel.py +0 -78
- mcp_agent/resources/examples/workflows/router.py +0 -53
- mcp_agent/resources/examples/workflows/sse.py +0 -23
- mcp_agent/telemetry/__init__.py +0 -0
- mcp_agent/telemetry/usage_tracking.py +0 -18
- mcp_agent/workflows/__init__.py +0 -0
- mcp_agent/workflows/embedding/__init__.py +0 -0
- mcp_agent/workflows/embedding/embedding_base.py +0 -61
- mcp_agent/workflows/embedding/embedding_cohere.py +0 -49
- mcp_agent/workflows/embedding/embedding_openai.py +0 -46
- mcp_agent/workflows/evaluator_optimizer/__init__.py +0 -0
- mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py +0 -481
- mcp_agent/workflows/intent_classifier/__init__.py +0 -0
- mcp_agent/workflows/intent_classifier/intent_classifier_base.py +0 -120
- mcp_agent/workflows/intent_classifier/intent_classifier_embedding.py +0 -134
- mcp_agent/workflows/intent_classifier/intent_classifier_embedding_cohere.py +0 -45
- mcp_agent/workflows/intent_classifier/intent_classifier_embedding_openai.py +0 -45
- mcp_agent/workflows/intent_classifier/intent_classifier_llm.py +0 -161
- mcp_agent/workflows/intent_classifier/intent_classifier_llm_anthropic.py +0 -60
- mcp_agent/workflows/intent_classifier/intent_classifier_llm_openai.py +0 -60
- mcp_agent/workflows/llm/__init__.py +0 -0
- mcp_agent/workflows/llm/augmented_llm.py +0 -753
- mcp_agent/workflows/llm/augmented_llm_passthrough.py +0 -241
- mcp_agent/workflows/llm/augmented_llm_playback.py +0 -109
- mcp_agent/workflows/llm/providers/__init__.py +0 -8
- mcp_agent/workflows/llm/sampling_format_converter.py +0 -22
- mcp_agent/workflows/orchestrator/__init__.py +0 -0
- mcp_agent/workflows/orchestrator/orchestrator.py +0 -578
- mcp_agent/workflows/parallel/__init__.py +0 -0
- mcp_agent/workflows/parallel/fan_in.py +0 -350
- mcp_agent/workflows/parallel/fan_out.py +0 -187
- mcp_agent/workflows/parallel/parallel_llm.py +0 -166
- mcp_agent/workflows/router/__init__.py +0 -0
- mcp_agent/workflows/router/router_base.py +0 -368
- mcp_agent/workflows/router/router_embedding.py +0 -240
- mcp_agent/workflows/router/router_embedding_cohere.py +0 -59
- mcp_agent/workflows/router/router_embedding_openai.py +0 -59
- mcp_agent/workflows/router/router_llm.py +0 -320
- mcp_agent/workflows/swarm/__init__.py +0 -0
- mcp_agent/workflows/swarm/swarm.py +0 -320
- mcp_agent/workflows/swarm/swarm_anthropic.py +0 -42
- mcp_agent/workflows/swarm/swarm_openai.py +0 -41
- {fast_agent_mcp-0.1.12.dist-info → fast_agent_mcp-0.2.0.dist-info}/WHEEL +0 -0
- {fast_agent_mcp-0.1.12.dist-info → fast_agent_mcp-0.2.0.dist-info}/entry_points.txt +0 -0
- {fast_agent_mcp-0.1.12.dist-info → fast_agent_mcp-0.2.0.dist-info}/licenses/LICENSE +0 -0
- /mcp_agent/{workflows/orchestrator → agents/workflow}/orchestrator_prompts.py +0 -0
mcp_agent/core/proxies.py
DELETED
@@ -1,269 +0,0 @@
|
|
1
|
-
"""
|
2
|
-
Proxy classes for agent interactions.
|
3
|
-
These proxies provide a consistent interface for interacting with different types of agents.
|
4
|
-
|
5
|
-
FOR COMPATIBILITY WITH LEGACY MCP-AGENT CODE
|
6
|
-
|
7
|
-
"""
|
8
|
-
|
9
|
-
from typing import List, Optional, Dict, Union, TYPE_CHECKING
|
10
|
-
|
11
|
-
from mcp_agent.agents.agent import Agent
|
12
|
-
from mcp_agent.app import MCPApp
|
13
|
-
from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
|
14
|
-
from mcp.types import EmbeddedResource
|
15
|
-
|
16
|
-
# Handle circular imports
|
17
|
-
if TYPE_CHECKING:
|
18
|
-
from mcp_agent.core.types import WorkflowType, ProxyDict
|
19
|
-
else:
|
20
|
-
# Define minimal versions for runtime
|
21
|
-
from typing import Any
|
22
|
-
|
23
|
-
# Use Any for runtime to avoid circular imports
|
24
|
-
WorkflowType = Any
|
25
|
-
ProxyDict = Dict[str, "BaseAgentProxy"]
|
26
|
-
|
27
|
-
|
28
|
-
class BaseAgentProxy:
|
29
|
-
"""Base class for all proxy types"""
|
30
|
-
|
31
|
-
def __init__(self, app: MCPApp, name: str):
|
32
|
-
self._app = app
|
33
|
-
self._name = name
|
34
|
-
|
35
|
-
async def __call__(self, message: Optional[str] = None) -> str:
|
36
|
-
"""Allow: agent.researcher('message') or just agent.researcher()"""
|
37
|
-
if message is None:
|
38
|
-
# When called with no arguments, use prompt() to open the interactive interface
|
39
|
-
return await self.prompt()
|
40
|
-
return await self.send(message)
|
41
|
-
|
42
|
-
async def send(
|
43
|
-
self, message: Optional[Union[str, PromptMessageMultipart]] = None
|
44
|
-
) -> str:
|
45
|
-
"""
|
46
|
-
Allow: agent.researcher.send('message') or agent.researcher.send(Prompt.user('message'))
|
47
|
-
|
48
|
-
Args:
|
49
|
-
message: Either a string message or a PromptMessageMultipart object
|
50
|
-
|
51
|
-
Returns:
|
52
|
-
The agent's response as a string
|
53
|
-
"""
|
54
|
-
if message is None:
|
55
|
-
# For consistency with agent(), use prompt() to open the interactive interface
|
56
|
-
return await self.prompt()
|
57
|
-
|
58
|
-
# If a PromptMessageMultipart is passed, use send_prompt
|
59
|
-
if isinstance(message, PromptMessageMultipart):
|
60
|
-
return await self.send_prompt(message)
|
61
|
-
|
62
|
-
# For string messages, use generate_str (traditional behavior)
|
63
|
-
return await self.generate_str(message)
|
64
|
-
|
65
|
-
async def prompt(self, default_prompt: str = "") -> str:
|
66
|
-
"""Allow: agent.researcher.prompt()"""
|
67
|
-
from mcp_agent.core.agent_app import AgentApp
|
68
|
-
|
69
|
-
# First check if _app is directly an AgentApp
|
70
|
-
if isinstance(self._app, AgentApp):
|
71
|
-
return await self._app.prompt(self._name, default_prompt)
|
72
|
-
|
73
|
-
# If not, check if it's an MCPApp with an _agent_app attribute
|
74
|
-
if hasattr(self._app, "_agent_app"):
|
75
|
-
agent_app = self._app._agent_app
|
76
|
-
if agent_app:
|
77
|
-
return await agent_app.prompt(self._name, default_prompt)
|
78
|
-
|
79
|
-
# If we can't find an AgentApp, return an error message
|
80
|
-
return "ERROR: Cannot prompt() - AgentApp not found"
|
81
|
-
|
82
|
-
async def generate_str(self, message: str) -> str:
|
83
|
-
"""Generate response for a message - must be implemented by subclasses"""
|
84
|
-
raise NotImplementedError("Subclasses must implement generate_str")
|
85
|
-
|
86
|
-
async def send_prompt(self, prompt: PromptMessageMultipart) -> str:
|
87
|
-
"""Send a message to the agent and return the response"""
|
88
|
-
raise NotImplementedError("Subclasses must implement send(prompt)")
|
89
|
-
|
90
|
-
async def apply_prompt(
|
91
|
-
self, prompt_name: str = None, arguments: dict[str, str] = None
|
92
|
-
) -> str:
|
93
|
-
"""
|
94
|
-
Apply a Prompt from an MCP Server - implemented by subclasses.
|
95
|
-
This is the preferred method for applying prompts.
|
96
|
-
Always returns an Assistant message.
|
97
|
-
|
98
|
-
Args:
|
99
|
-
prompt_name: Name of the prompt to apply
|
100
|
-
arguments: Optional dictionary of string arguments for prompt templating
|
101
|
-
"""
|
102
|
-
raise NotImplementedError("Subclasses must implement apply_prompt")
|
103
|
-
|
104
|
-
|
105
|
-
class LLMAgentProxy(BaseAgentProxy):
|
106
|
-
"""Proxy for regular agents that use _llm.generate_str()"""
|
107
|
-
|
108
|
-
def __init__(self, app: MCPApp, name: str, agent: Agent):
|
109
|
-
super().__init__(app, name)
|
110
|
-
self._agent = agent
|
111
|
-
|
112
|
-
async def generate_str(self, message: str, **kwargs) -> str:
|
113
|
-
"""Forward message and all kwargs to the agent's LLM"""
|
114
|
-
return await self._agent._llm.generate_str(message, **kwargs)
|
115
|
-
|
116
|
-
async def send_prompt(self, prompt: PromptMessageMultipart) -> str:
|
117
|
-
"""Send a message to the agent and return the response"""
|
118
|
-
return await self._agent._llm.generate_prompt(prompt, None)
|
119
|
-
|
120
|
-
async def apply_prompt(
|
121
|
-
self, prompt_name: str = None, arguments: dict[str, str] = None
|
122
|
-
) -> str:
|
123
|
-
"""
|
124
|
-
Apply a prompt from an MCP server.
|
125
|
-
This is the preferred method for applying prompts.
|
126
|
-
|
127
|
-
Args:
|
128
|
-
prompt_name: Name of the prompt to apply
|
129
|
-
arguments: Optional dictionary of string arguments for prompt templating
|
130
|
-
|
131
|
-
Returns:
|
132
|
-
The assistant's response
|
133
|
-
"""
|
134
|
-
return await self._agent.apply_prompt(prompt_name, arguments)
|
135
|
-
|
136
|
-
# Add the new methods
|
137
|
-
async def get_embedded_resources(
|
138
|
-
self, server_name: str, resource_name: str
|
139
|
-
) -> List[EmbeddedResource]:
|
140
|
-
"""
|
141
|
-
Get a resource from an MCP server and return it as a list of embedded resources ready for use in prompts.
|
142
|
-
|
143
|
-
Args:
|
144
|
-
server_name: Name of the MCP server to retrieve the resource from
|
145
|
-
resource_name: Name or URI of the resource to retrieve
|
146
|
-
|
147
|
-
Returns:
|
148
|
-
List of EmbeddedResource objects ready to use in a PromptMessageMultipart
|
149
|
-
"""
|
150
|
-
return await self._agent.get_embedded_resources(server_name, resource_name)
|
151
|
-
|
152
|
-
async def with_resource(
|
153
|
-
self,
|
154
|
-
prompt_content: Union[str, PromptMessageMultipart],
|
155
|
-
server_name: str,
|
156
|
-
resource_name: str,
|
157
|
-
) -> str:
|
158
|
-
"""
|
159
|
-
Create a prompt with the given content and resource, then send it to the agent.
|
160
|
-
|
161
|
-
Args:
|
162
|
-
prompt_content: Either a string message or an existing PromptMessageMultipart
|
163
|
-
server_name: Name of the MCP server to retrieve the resource from
|
164
|
-
resource_name: Name or URI of the resource to retrieve
|
165
|
-
|
166
|
-
Returns:
|
167
|
-
The agent's response as a string
|
168
|
-
"""
|
169
|
-
return await self._agent.with_resource(
|
170
|
-
prompt_content, server_name, resource_name
|
171
|
-
)
|
172
|
-
|
173
|
-
|
174
|
-
class WorkflowProxy(BaseAgentProxy):
|
175
|
-
"""Proxy for workflow types that implement generate_str() directly"""
|
176
|
-
|
177
|
-
def __init__(self, app: MCPApp, name: str, workflow: WorkflowType):
|
178
|
-
super().__init__(app, name)
|
179
|
-
self._workflow = workflow
|
180
|
-
|
181
|
-
async def generate_str(self, message: str, **kwargs) -> str:
|
182
|
-
"""Forward message and all kwargs to the underlying workflow"""
|
183
|
-
return await self._workflow.generate_str(message, **kwargs)
|
184
|
-
|
185
|
-
|
186
|
-
class RouterProxy(BaseAgentProxy):
|
187
|
-
"""Proxy for LLM Routers"""
|
188
|
-
|
189
|
-
def __init__(self, app: MCPApp, name: str, workflow: WorkflowType):
|
190
|
-
super().__init__(app, name)
|
191
|
-
self._workflow = workflow
|
192
|
-
|
193
|
-
async def generate_str(self, message: str, **kwargs) -> str:
|
194
|
-
"""
|
195
|
-
Route the message and forward kwargs to the resulting agent if applicable.
|
196
|
-
Note: For now, route() itself doesn't accept kwargs.
|
197
|
-
"""
|
198
|
-
results = await self._workflow.route(message)
|
199
|
-
if not results:
|
200
|
-
return "No appropriate route found for the request."
|
201
|
-
|
202
|
-
# Get the top result
|
203
|
-
top_result = results[0]
|
204
|
-
if isinstance(top_result.result, Agent):
|
205
|
-
# Agent route - delegate to the agent, passing along kwargs
|
206
|
-
agent = top_result.result
|
207
|
-
return await agent._llm.generate_str(message, **kwargs)
|
208
|
-
elif isinstance(top_result.result, str):
|
209
|
-
# Server route - use the router directly
|
210
|
-
return "Tool call requested by router - not yet supported"
|
211
|
-
|
212
|
-
return f"Routed to: {top_result.result} ({top_result.confidence}): {top_result.reasoning}"
|
213
|
-
|
214
|
-
|
215
|
-
class ChainProxy(BaseAgentProxy):
|
216
|
-
"""Proxy for chained agent operations"""
|
217
|
-
|
218
|
-
def __init__(
|
219
|
-
self, app: MCPApp, name: str, sequence: List[str], agent_proxies: ProxyDict
|
220
|
-
):
|
221
|
-
super().__init__(app, name)
|
222
|
-
self._sequence = sequence
|
223
|
-
self._agent_proxies = agent_proxies
|
224
|
-
self._continue_with_final = True # Default behavior
|
225
|
-
self._cumulative = False # Default to sequential chaining
|
226
|
-
|
227
|
-
async def generate_str(self, message: str, **kwargs) -> str:
|
228
|
-
"""Chain message through a sequence of agents.
|
229
|
-
|
230
|
-
For the first agent in the chain, pass all kwargs to maintain transparency.
|
231
|
-
|
232
|
-
Two modes of operation:
|
233
|
-
1. Sequential (default): Each agent receives only the output of the previous agent
|
234
|
-
2. Cumulative: Each agent receives all previous agent responses concatenated
|
235
|
-
"""
|
236
|
-
if not self._sequence:
|
237
|
-
return message
|
238
|
-
|
239
|
-
# Process the first agent (same for both modes)
|
240
|
-
first_agent = self._sequence[0]
|
241
|
-
first_proxy = self._agent_proxies[first_agent]
|
242
|
-
first_response = await first_proxy.generate_str(message, **kwargs)
|
243
|
-
|
244
|
-
if len(self._sequence) == 1:
|
245
|
-
return first_response
|
246
|
-
|
247
|
-
if self._cumulative:
|
248
|
-
# Cumulative mode: each agent gets all previous responses
|
249
|
-
cumulative_response = f'<fastagent:response agent="{first_agent}">\n{first_response}\n</fastagent:response>'
|
250
|
-
|
251
|
-
# Process subsequent agents with cumulative results
|
252
|
-
for agent_name in self._sequence[1:]:
|
253
|
-
proxy = self._agent_proxies[agent_name]
|
254
|
-
# Pass all previous responses to next agent
|
255
|
-
agent_response = await proxy.generate_str(cumulative_response)
|
256
|
-
# Add this agent's response to the cumulative result
|
257
|
-
cumulative_response += f'\n\n<fastagent:response agent="{agent_name}">\n{agent_response}\n</fastagent:response>'
|
258
|
-
|
259
|
-
return cumulative_response
|
260
|
-
else:
|
261
|
-
# Sequential chaining (original behavior)
|
262
|
-
current_message = first_response
|
263
|
-
|
264
|
-
# For subsequent agents, just pass the message from previous agent
|
265
|
-
for agent_name in self._sequence[1:]:
|
266
|
-
proxy = self._agent_proxies[agent_name]
|
267
|
-
current_message = await proxy.generate_str(current_message)
|
268
|
-
|
269
|
-
return current_message
|
mcp_agent/core/types.py
DELETED
@@ -1,24 +0,0 @@
|
|
1
|
-
"""
|
2
|
-
Type definitions for fast-agent core module.
|
3
|
-
"""
|
4
|
-
|
5
|
-
from typing import Dict, Union, TypeAlias, TYPE_CHECKING
|
6
|
-
|
7
|
-
from mcp_agent.agents.agent import Agent
|
8
|
-
from mcp_agent.workflows.orchestrator.orchestrator import Orchestrator
|
9
|
-
from mcp_agent.workflows.parallel.parallel_llm import ParallelLLM
|
10
|
-
from mcp_agent.workflows.evaluator_optimizer.evaluator_optimizer import (
|
11
|
-
EvaluatorOptimizerLLM,
|
12
|
-
)
|
13
|
-
from mcp_agent.workflows.router.router_llm import LLMRouter
|
14
|
-
|
15
|
-
# Avoid circular imports
|
16
|
-
if TYPE_CHECKING:
|
17
|
-
from mcp_agent.core.proxies import BaseAgentProxy
|
18
|
-
|
19
|
-
# Type aliases for better readability
|
20
|
-
WorkflowType: TypeAlias = Union[
|
21
|
-
Orchestrator, ParallelLLM, EvaluatorOptimizerLLM, LLMRouter
|
22
|
-
]
|
23
|
-
AgentOrWorkflow: TypeAlias = Union[Agent, WorkflowType]
|
24
|
-
ProxyDict: TypeAlias = Dict[str, "BaseAgentProxy"] # Forward reference as string
|
mcp_agent/eval/__init__.py
DELETED
File without changes
|
mcp_agent/mcp/stdio.py
DELETED
@@ -1,111 +0,0 @@
|
|
1
|
-
"""
|
2
|
-
Custom implementation of stdio_client that handles stderr through rich console.
|
3
|
-
"""
|
4
|
-
|
5
|
-
from contextlib import asynccontextmanager
|
6
|
-
import subprocess
|
7
|
-
import anyio
|
8
|
-
from anyio.streams.text import TextReceiveStream
|
9
|
-
from mcp.client.stdio import StdioServerParameters, get_default_environment
|
10
|
-
import mcp.types as types
|
11
|
-
from mcp_agent.logging.logger import get_logger
|
12
|
-
from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream
|
13
|
-
|
14
|
-
logger = get_logger(__name__)
|
15
|
-
|
16
|
-
|
17
|
-
# TODO this will be removed when client library with https://github.com/modelcontextprotocol/python-sdk/pull/343 is released
|
18
|
-
@asynccontextmanager
|
19
|
-
async def stdio_client_with_rich_stderr(server: StdioServerParameters):
|
20
|
-
"""
|
21
|
-
Modified version of stdio_client that captures stderr and routes it through our rich console.
|
22
|
-
Follows the original pattern closely for reliability.
|
23
|
-
|
24
|
-
Args:
|
25
|
-
server: The server parameters for the stdio connection
|
26
|
-
"""
|
27
|
-
read_stream: MemoryObjectReceiveStream[types.JSONRPCMessage | Exception]
|
28
|
-
read_stream_writer: MemoryObjectSendStream[types.JSONRPCMessage | Exception]
|
29
|
-
|
30
|
-
write_stream: MemoryObjectSendStream[types.JSONRPCMessage]
|
31
|
-
write_stream_reader: MemoryObjectReceiveStream[types.JSONRPCMessage]
|
32
|
-
|
33
|
-
read_stream_writer, read_stream = anyio.create_memory_object_stream(0)
|
34
|
-
write_stream, write_stream_reader = anyio.create_memory_object_stream(0)
|
35
|
-
# Open process with stderr piped for capture
|
36
|
-
|
37
|
-
process = await anyio.open_process(
|
38
|
-
[server.command, *server.args],
|
39
|
-
env=server.env if server.env is not None else get_default_environment(),
|
40
|
-
stderr=subprocess.PIPE,
|
41
|
-
)
|
42
|
-
|
43
|
-
if process.pid:
|
44
|
-
logger.debug(f"Started process '{server.command}' with PID: {process.pid}")
|
45
|
-
|
46
|
-
if process.returncode is not None:
|
47
|
-
logger.debug(f"return code (early){process.returncode}")
|
48
|
-
raise RuntimeError(
|
49
|
-
f"Process terminated immediately with code {process.returncode}"
|
50
|
-
)
|
51
|
-
|
52
|
-
async def stdout_reader():
|
53
|
-
assert process.stdout, "Opened process is missing stdout"
|
54
|
-
try:
|
55
|
-
async with read_stream_writer:
|
56
|
-
buffer = ""
|
57
|
-
async for chunk in TextReceiveStream(
|
58
|
-
process.stdout,
|
59
|
-
encoding=server.encoding,
|
60
|
-
errors=server.encoding_error_handler,
|
61
|
-
):
|
62
|
-
lines = (buffer + chunk).split("\n")
|
63
|
-
buffer = lines.pop()
|
64
|
-
|
65
|
-
for line in lines:
|
66
|
-
if not line:
|
67
|
-
continue
|
68
|
-
try:
|
69
|
-
message = types.JSONRPCMessage.model_validate_json(line)
|
70
|
-
except Exception as exc:
|
71
|
-
await read_stream_writer.send(exc)
|
72
|
-
continue
|
73
|
-
|
74
|
-
await read_stream_writer.send(message)
|
75
|
-
except anyio.ClosedResourceError:
|
76
|
-
await anyio.lowlevel.checkpoint()
|
77
|
-
|
78
|
-
# async def stderr_reader():
|
79
|
-
# assert process.stderr, "Opened process is missing stderr"
|
80
|
-
# try:
|
81
|
-
# async for chunk in TextReceiveStream(
|
82
|
-
# process.stderr,
|
83
|
-
# encoding=server.encoding,
|
84
|
-
# errors=server.encoding_error_handler,
|
85
|
-
# ):
|
86
|
-
# if chunk.strip():
|
87
|
-
# # Let the logging system handle the formatting consistently
|
88
|
-
# logger.event("info", "mcpserver.stderr", chunk.rstrip(), None, {})
|
89
|
-
# except anyio.ClosedResourceError:
|
90
|
-
# await anyio.lowlevel.checkpoint()
|
91
|
-
|
92
|
-
async def stdin_writer():
|
93
|
-
assert process.stdin, "Opened process is missing stdin"
|
94
|
-
try:
|
95
|
-
async with write_stream_reader:
|
96
|
-
async for message in write_stream_reader:
|
97
|
-
json = message.model_dump_json(by_alias=True, exclude_none=True)
|
98
|
-
await process.stdin.send(
|
99
|
-
(json + "\n").encode(
|
100
|
-
encoding=server.encoding,
|
101
|
-
errors=server.encoding_error_handler,
|
102
|
-
)
|
103
|
-
)
|
104
|
-
except anyio.ClosedResourceError:
|
105
|
-
await anyio.lowlevel.checkpoint()
|
106
|
-
|
107
|
-
# Use context managers to handle cleanup automatically
|
108
|
-
async with anyio.create_task_group() as tg, process:
|
109
|
-
tg.start_soon(stdout_reader)
|
110
|
-
tg.start_soon(stdin_writer)
|
111
|
-
yield read_stream, write_stream
|
@@ -1,188 +0,0 @@
|
|
1
|
-
import asyncio
|
2
|
-
|
3
|
-
from mcp_agent.core.fastagent import FastAgent
|
4
|
-
from mcp_agent.workflows.llm.augmented_llm import RequestParams
|
5
|
-
|
6
|
-
# Create the application
|
7
|
-
fast = FastAgent("Data Analysis & Campaign Generator")
|
8
|
-
|
9
|
-
|
10
|
-
# Original data analysis components
|
11
|
-
@fast.agent(
|
12
|
-
name="data_analysis",
|
13
|
-
instruction="""
|
14
|
-
You have access to a Python 3.12 interpreter and you can use this to analyse and process data.
|
15
|
-
Common analysis packages such as Pandas, Seaborn and Matplotlib are already installed.
|
16
|
-
You can add further packages if needed.
|
17
|
-
Data files are accessible from the /mnt/data/ directory (this is the current working directory).
|
18
|
-
Visualisations should be saved as .png files in the current working directory.
|
19
|
-
Extract key insights that would be compelling for a social media campaign.
|
20
|
-
""",
|
21
|
-
servers=["interpreter"],
|
22
|
-
request_params=RequestParams(maxTokens=8192),
|
23
|
-
model="sonnet",
|
24
|
-
)
|
25
|
-
@fast.agent(
|
26
|
-
"evaluator",
|
27
|
-
"""You are collaborating with a Data Analysis tool that has the capability to analyse data and produce visualisations.
|
28
|
-
You must make sure that the tool has:
|
29
|
-
- Considered the best way for a Human to interpret the data
|
30
|
-
- Produced insightful visualisations.
|
31
|
-
- Provided a high level summary report for the Human.
|
32
|
-
- Has had its findings challenged, and justified
|
33
|
-
- Extracted compelling insights suitable for social media promotion
|
34
|
-
""",
|
35
|
-
request_params=RequestParams(maxTokens=8192),
|
36
|
-
model="gpt-4o",
|
37
|
-
)
|
38
|
-
@fast.evaluator_optimizer(
|
39
|
-
"analysis_tool",
|
40
|
-
generator="data_analysis",
|
41
|
-
evaluator="evaluator",
|
42
|
-
max_refinements=3,
|
43
|
-
min_rating="EXCELLENT",
|
44
|
-
)
|
45
|
-
# Research component using Brave search
|
46
|
-
@fast.agent(
|
47
|
-
"context_researcher",
|
48
|
-
"""You are a research specialist who provides cultural context for different regions.
|
49
|
-
For any given data insight and target language/region, research:
|
50
|
-
1. Cultural sensitivities related to presenting this type of data
|
51
|
-
2. Local social media trends and preferences
|
52
|
-
3. Region-specific considerations for marketing campaigns
|
53
|
-
|
54
|
-
Always provide actionable recommendations for adapting content to each culture.
|
55
|
-
""",
|
56
|
-
servers=["fetch", "brave"], # Using the fetch MCP server for Brave search
|
57
|
-
request_params=RequestParams(temperature=0.3),
|
58
|
-
model="gpt-4o",
|
59
|
-
)
|
60
|
-
# Social media content generator
|
61
|
-
@fast.agent(
|
62
|
-
"campaign_generator",
|
63
|
-
"""Generate engaging social media content based on data insights.
|
64
|
-
Create compelling, shareable content that:
|
65
|
-
- Highlights key research findings in an accessible way
|
66
|
-
- Uses appropriate tone for the platform (Twitter/X, LinkedIn, Instagram, etc.)
|
67
|
-
- Is concise and impactful
|
68
|
-
- Includes suggested hashtags and posting schedule
|
69
|
-
|
70
|
-
Format your response with clear sections for each platform.
|
71
|
-
Save different campaign elements as separate files in the current directory.
|
72
|
-
""",
|
73
|
-
servers=["filesystem"], # Using filesystem MCP server to save files
|
74
|
-
request_params=RequestParams(temperature=0.7),
|
75
|
-
model="sonnet",
|
76
|
-
use_history=False,
|
77
|
-
)
|
78
|
-
# Translation agents with cultural adaptation
|
79
|
-
@fast.agent(
|
80
|
-
"translate_fr",
|
81
|
-
"""Translate social media content to French with cultural adaptation.
|
82
|
-
Consider French cultural norms, expressions, and social media preferences.
|
83
|
-
Ensure the translation maintains the impact of the original while being culturally appropriate.
|
84
|
-
Save the translated content to a file with appropriate naming.
|
85
|
-
""",
|
86
|
-
model="haiku",
|
87
|
-
use_history=False,
|
88
|
-
servers=["filesystem"],
|
89
|
-
)
|
90
|
-
@fast.agent(
|
91
|
-
"translate_es",
|
92
|
-
"""Translate social media content to Spanish with cultural adaptation.
|
93
|
-
Consider Spanish-speaking cultural contexts, expressions, and social media preferences.
|
94
|
-
Ensure the translation maintains the impact of the original while being culturally appropriate.
|
95
|
-
Save the translated content to a file with appropriate naming.
|
96
|
-
""",
|
97
|
-
model="haiku",
|
98
|
-
use_history=False,
|
99
|
-
servers=["filesystem"],
|
100
|
-
)
|
101
|
-
@fast.agent(
|
102
|
-
"translate_de",
|
103
|
-
"""Translate social media content to German with cultural adaptation.
|
104
|
-
Consider German cultural norms, expressions, and social media preferences.
|
105
|
-
Ensure the translation maintains the impact of the original while being culturally appropriate.
|
106
|
-
Save the translated content to a file with appropriate naming.
|
107
|
-
""",
|
108
|
-
model="haiku",
|
109
|
-
use_history=False,
|
110
|
-
servers=["filesystem"],
|
111
|
-
)
|
112
|
-
@fast.agent(
|
113
|
-
"translate_ja",
|
114
|
-
"""Translate social media content to Japanese with cultural adaptation.
|
115
|
-
Consider Japanese cultural norms, expressions, and social media preferences.
|
116
|
-
Ensure the translation maintains the impact of the original while being culturally appropriate.
|
117
|
-
Save the translated content to a file with appropriate naming.
|
118
|
-
""",
|
119
|
-
model="haiku",
|
120
|
-
use_history=False,
|
121
|
-
servers=["filesystem"],
|
122
|
-
)
|
123
|
-
# Parallel workflow for translations
|
124
|
-
@fast.parallel(
|
125
|
-
"translate_campaign",
|
126
|
-
instruction="Translates content to French, Spanish, German and Japanese. Supply the content to translate, translations will be saved to the filesystem.",
|
127
|
-
fan_out=["translate_fr", "translate_es", "translate_de", "translate_ja"],
|
128
|
-
include_request=True,
|
129
|
-
)
|
130
|
-
# Cultural sensitivity review agent
|
131
|
-
@fast.agent(
|
132
|
-
"cultural_reviewer",
|
133
|
-
"""Review all translated content for cultural sensitivity and appropriateness.
|
134
|
-
For each language version, evaluate:
|
135
|
-
- Cultural appropriateness
|
136
|
-
- Potential misunderstandings or sensitivities
|
137
|
-
- Effectiveness for the target culture
|
138
|
-
|
139
|
-
Provide specific recommendations for any needed adjustments and save a review report.
|
140
|
-
""",
|
141
|
-
servers=["filesystem"],
|
142
|
-
request_params=RequestParams(temperature=0.2),
|
143
|
-
)
|
144
|
-
# Campaign optimization workflow
|
145
|
-
@fast.evaluator_optimizer(
|
146
|
-
"campaign_optimizer",
|
147
|
-
generator="campaign_generator",
|
148
|
-
evaluator="cultural_reviewer",
|
149
|
-
max_refinements=2,
|
150
|
-
min_rating="EXCELLENT",
|
151
|
-
)
|
152
|
-
# Main workflow orchestration
|
153
|
-
@fast.orchestrator(
|
154
|
-
"research_campaign_creator",
|
155
|
-
instruction="""
|
156
|
-
Create a complete multi-lingual social media campaign based on data analysis results.
|
157
|
-
The workflow will:
|
158
|
-
1. Analyze the provided data and extract key insights
|
159
|
-
2. Research cultural contexts for target languages
|
160
|
-
3. Generate appropriate social media content
|
161
|
-
4. Translate and culturally adapt the content
|
162
|
-
5. Review and optimize all materials
|
163
|
-
6. Save all campaign elements to files
|
164
|
-
""",
|
165
|
-
agents=[
|
166
|
-
"analysis_tool",
|
167
|
-
"context_researcher",
|
168
|
-
"campaign_optimizer",
|
169
|
-
"translate_campaign",
|
170
|
-
],
|
171
|
-
model="sonnet", # Using a more capable model for orchestration
|
172
|
-
request_params=RequestParams(maxTokens=8192),
|
173
|
-
plan_type="full",
|
174
|
-
)
|
175
|
-
async def main():
|
176
|
-
# Use the app's context manager
|
177
|
-
print(
|
178
|
-
"WARNING: This workflow will likely run for >10 minutes and consume a lot of tokens. Press Enter to accept the default prompt and proceed"
|
179
|
-
)
|
180
|
-
|
181
|
-
async with fast.run() as agent:
|
182
|
-
await agent.research_campaign_creator.prompt(
|
183
|
-
default_prompt="Analyze the CSV file in the current directory and create a comprehensive multi-lingual social media campaign based on the findings. Save all campaign elements as separate files."
|
184
|
-
)
|
185
|
-
|
186
|
-
|
187
|
-
if __name__ == "__main__":
|
188
|
-
asyncio.run(main())
|
@@ -1,65 +0,0 @@
|
|
1
|
-
import asyncio
|
2
|
-
|
3
|
-
from mcp_agent.core.fastagent import FastAgent
|
4
|
-
from mcp_agent.workflows.llm.augmented_llm import RequestParams
|
5
|
-
|
6
|
-
# Create the application
|
7
|
-
fast = FastAgent("Data Analysis (Roots)")
|
8
|
-
|
9
|
-
|
10
|
-
# The sample data is under Database Contents License (DbCL) v1.0.
|
11
|
-
# Available here : https://www.kaggle.com/datasets/pavansubhasht/ibm-hr-analytics-attrition-dataset
|
12
|
-
|
13
|
-
|
14
|
-
@fast.agent(
|
15
|
-
name="data_analysis",
|
16
|
-
instruction="""
|
17
|
-
You have access to a Python 3.12 interpreter and you can use this to analyse and process data.
|
18
|
-
Common analysis packages such as Pandas, Seaborn and Matplotlib are already installed.
|
19
|
-
You can add further packages if needed.
|
20
|
-
Data files are accessible from the /mnt/data/ directory (this is the current working directory).
|
21
|
-
Visualisations should be saved as .png files in the current working directory.
|
22
|
-
""",
|
23
|
-
servers=["interpreter"],
|
24
|
-
request_params=RequestParams(maxTokens=8192),
|
25
|
-
)
|
26
|
-
async def main():
|
27
|
-
# Use the app's context manager
|
28
|
-
async with fast.run() as agent:
|
29
|
-
await agent(
|
30
|
-
"There is a csv file in the current directory. "
|
31
|
-
"Analyse the file, produce a detailed description of the data, and any patterns it contains.",
|
32
|
-
)
|
33
|
-
await agent(
|
34
|
-
"Consider the data, and how to usefully group it for presentation to a Human. Find insights, using the Python Interpreter as needed.\n"
|
35
|
-
"Use MatPlotLib to produce insightful visualisations. Save them as '.png' files in the current directory. Be sure to run the code and save the files.\n"
|
36
|
-
"Produce a summary with major insights to the data",
|
37
|
-
)
|
38
|
-
await agent()
|
39
|
-
|
40
|
-
|
41
|
-
if __name__ == "__main__":
|
42
|
-
asyncio.run(main())
|
43
|
-
|
44
|
-
|
45
|
-
############################################################################################################
|
46
|
-
# Example of evaluator/optimizer flow
|
47
|
-
############################################################################################################
|
48
|
-
# @fast.agent(
|
49
|
-
# "evaluator",
|
50
|
-
# """You are collaborating with a Data Analysis tool that has the capability to analyse data and produce visualisations.
|
51
|
-
# You must make sure that the tool has:
|
52
|
-
# - Considered the best way for a Human to interpret the data
|
53
|
-
# - Produced insightful visualasions.
|
54
|
-
# - Provided a high level summary report for the Human.
|
55
|
-
# - Has had its findings challenged, and justified
|
56
|
-
# """,
|
57
|
-
# request_params=RequestParams(maxTokens=8192),
|
58
|
-
# )
|
59
|
-
# @fast.evaluator_optimizer(
|
60
|
-
# "analysis_tool",
|
61
|
-
# generator="data_analysis",
|
62
|
-
# evaluator="evaluator",
|
63
|
-
# max_refinements=3,
|
64
|
-
# min_rating="EXCELLENT",
|
65
|
-
# )
|