fast-agent-mcp 0.1.13__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (147) hide show
  1. {fast_agent_mcp-0.1.13.dist-info → fast_agent_mcp-0.2.0.dist-info}/METADATA +3 -4
  2. fast_agent_mcp-0.2.0.dist-info/RECORD +123 -0
  3. mcp_agent/__init__.py +75 -0
  4. mcp_agent/agents/agent.py +59 -371
  5. mcp_agent/agents/base_agent.py +522 -0
  6. mcp_agent/agents/workflow/__init__.py +1 -0
  7. mcp_agent/agents/workflow/chain_agent.py +173 -0
  8. mcp_agent/agents/workflow/evaluator_optimizer.py +362 -0
  9. mcp_agent/agents/workflow/orchestrator_agent.py +591 -0
  10. mcp_agent/{workflows/orchestrator → agents/workflow}/orchestrator_models.py +27 -11
  11. mcp_agent/agents/workflow/parallel_agent.py +182 -0
  12. mcp_agent/agents/workflow/router_agent.py +307 -0
  13. mcp_agent/app.py +3 -1
  14. mcp_agent/cli/commands/bootstrap.py +18 -7
  15. mcp_agent/cli/commands/setup.py +12 -4
  16. mcp_agent/cli/main.py +1 -1
  17. mcp_agent/cli/terminal.py +1 -1
  18. mcp_agent/config.py +24 -35
  19. mcp_agent/context.py +3 -1
  20. mcp_agent/context_dependent.py +3 -1
  21. mcp_agent/core/agent_types.py +10 -7
  22. mcp_agent/core/direct_agent_app.py +179 -0
  23. mcp_agent/core/direct_decorators.py +443 -0
  24. mcp_agent/core/direct_factory.py +476 -0
  25. mcp_agent/core/enhanced_prompt.py +15 -20
  26. mcp_agent/core/fastagent.py +151 -337
  27. mcp_agent/core/interactive_prompt.py +424 -0
  28. mcp_agent/core/mcp_content.py +19 -11
  29. mcp_agent/core/prompt.py +6 -2
  30. mcp_agent/core/validation.py +89 -16
  31. mcp_agent/executor/decorator_registry.py +6 -2
  32. mcp_agent/executor/temporal.py +35 -11
  33. mcp_agent/executor/workflow_signal.py +8 -2
  34. mcp_agent/human_input/handler.py +3 -1
  35. mcp_agent/llm/__init__.py +2 -0
  36. mcp_agent/{workflows/llm → llm}/augmented_llm.py +131 -256
  37. mcp_agent/{workflows/llm → llm}/augmented_llm_passthrough.py +35 -107
  38. mcp_agent/llm/augmented_llm_playback.py +83 -0
  39. mcp_agent/{workflows/llm → llm}/model_factory.py +26 -8
  40. mcp_agent/llm/providers/__init__.py +8 -0
  41. mcp_agent/{workflows/llm → llm/providers}/anthropic_utils.py +5 -1
  42. mcp_agent/{workflows/llm → llm/providers}/augmented_llm_anthropic.py +37 -141
  43. mcp_agent/llm/providers/augmented_llm_deepseek.py +53 -0
  44. mcp_agent/{workflows/llm → llm/providers}/augmented_llm_openai.py +112 -148
  45. mcp_agent/{workflows/llm → llm}/providers/multipart_converter_anthropic.py +78 -35
  46. mcp_agent/{workflows/llm → llm}/providers/multipart_converter_openai.py +73 -44
  47. mcp_agent/{workflows/llm → llm}/providers/openai_multipart.py +18 -4
  48. mcp_agent/{workflows/llm → llm/providers}/openai_utils.py +3 -3
  49. mcp_agent/{workflows/llm → llm}/providers/sampling_converter_anthropic.py +3 -3
  50. mcp_agent/{workflows/llm → llm}/providers/sampling_converter_openai.py +3 -3
  51. mcp_agent/{workflows/llm → llm}/sampling_converter.py +0 -21
  52. mcp_agent/{workflows/llm → llm}/sampling_format_converter.py +16 -1
  53. mcp_agent/logging/logger.py +2 -2
  54. mcp_agent/mcp/gen_client.py +9 -3
  55. mcp_agent/mcp/interfaces.py +67 -45
  56. mcp_agent/mcp/logger_textio.py +97 -0
  57. mcp_agent/mcp/mcp_agent_client_session.py +12 -4
  58. mcp_agent/mcp/mcp_agent_server.py +3 -1
  59. mcp_agent/mcp/mcp_aggregator.py +124 -93
  60. mcp_agent/mcp/mcp_connection_manager.py +21 -7
  61. mcp_agent/mcp/prompt_message_multipart.py +59 -1
  62. mcp_agent/mcp/prompt_render.py +77 -0
  63. mcp_agent/mcp/prompt_serialization.py +20 -13
  64. mcp_agent/mcp/prompts/prompt_constants.py +18 -0
  65. mcp_agent/mcp/prompts/prompt_helpers.py +327 -0
  66. mcp_agent/mcp/prompts/prompt_load.py +15 -5
  67. mcp_agent/mcp/prompts/prompt_server.py +154 -87
  68. mcp_agent/mcp/prompts/prompt_template.py +26 -35
  69. mcp_agent/mcp/resource_utils.py +3 -1
  70. mcp_agent/mcp/sampling.py +24 -15
  71. mcp_agent/mcp_server/agent_server.py +8 -5
  72. mcp_agent/mcp_server_registry.py +22 -9
  73. mcp_agent/resources/examples/{workflows → in_dev}/agent_build.py +1 -1
  74. mcp_agent/resources/examples/{data-analysis → in_dev}/slides.py +1 -1
  75. mcp_agent/resources/examples/internal/agent.py +4 -2
  76. mcp_agent/resources/examples/internal/fastagent.config.yaml +8 -2
  77. mcp_agent/resources/examples/prompting/image_server.py +3 -1
  78. mcp_agent/resources/examples/prompting/work_with_image.py +19 -0
  79. mcp_agent/ui/console_display.py +27 -7
  80. fast_agent_mcp-0.1.13.dist-info/RECORD +0 -164
  81. mcp_agent/core/agent_app.py +0 -570
  82. mcp_agent/core/agent_utils.py +0 -69
  83. mcp_agent/core/decorators.py +0 -448
  84. mcp_agent/core/factory.py +0 -422
  85. mcp_agent/core/proxies.py +0 -278
  86. mcp_agent/core/types.py +0 -22
  87. mcp_agent/eval/__init__.py +0 -0
  88. mcp_agent/mcp/stdio.py +0 -114
  89. mcp_agent/resources/examples/data-analysis/analysis-campaign.py +0 -188
  90. mcp_agent/resources/examples/data-analysis/analysis.py +0 -65
  91. mcp_agent/resources/examples/data-analysis/fastagent.config.yaml +0 -41
  92. mcp_agent/resources/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +0 -1471
  93. mcp_agent/resources/examples/mcp_researcher/researcher-eval.py +0 -53
  94. mcp_agent/resources/examples/researcher/fastagent.config.yaml +0 -66
  95. mcp_agent/resources/examples/researcher/researcher-eval.py +0 -53
  96. mcp_agent/resources/examples/researcher/researcher-imp.py +0 -189
  97. mcp_agent/resources/examples/researcher/researcher.py +0 -39
  98. mcp_agent/resources/examples/workflows/chaining.py +0 -45
  99. mcp_agent/resources/examples/workflows/evaluator.py +0 -79
  100. mcp_agent/resources/examples/workflows/fastagent.config.yaml +0 -24
  101. mcp_agent/resources/examples/workflows/human_input.py +0 -26
  102. mcp_agent/resources/examples/workflows/orchestrator.py +0 -74
  103. mcp_agent/resources/examples/workflows/parallel.py +0 -79
  104. mcp_agent/resources/examples/workflows/router.py +0 -54
  105. mcp_agent/resources/examples/workflows/sse.py +0 -23
  106. mcp_agent/telemetry/__init__.py +0 -0
  107. mcp_agent/telemetry/usage_tracking.py +0 -19
  108. mcp_agent/workflows/__init__.py +0 -0
  109. mcp_agent/workflows/embedding/__init__.py +0 -0
  110. mcp_agent/workflows/embedding/embedding_base.py +0 -58
  111. mcp_agent/workflows/embedding/embedding_cohere.py +0 -49
  112. mcp_agent/workflows/embedding/embedding_openai.py +0 -37
  113. mcp_agent/workflows/evaluator_optimizer/__init__.py +0 -0
  114. mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py +0 -447
  115. mcp_agent/workflows/intent_classifier/__init__.py +0 -0
  116. mcp_agent/workflows/intent_classifier/intent_classifier_base.py +0 -117
  117. mcp_agent/workflows/intent_classifier/intent_classifier_embedding.py +0 -130
  118. mcp_agent/workflows/intent_classifier/intent_classifier_embedding_cohere.py +0 -41
  119. mcp_agent/workflows/intent_classifier/intent_classifier_embedding_openai.py +0 -41
  120. mcp_agent/workflows/intent_classifier/intent_classifier_llm.py +0 -150
  121. mcp_agent/workflows/intent_classifier/intent_classifier_llm_anthropic.py +0 -60
  122. mcp_agent/workflows/intent_classifier/intent_classifier_llm_openai.py +0 -58
  123. mcp_agent/workflows/llm/__init__.py +0 -0
  124. mcp_agent/workflows/llm/augmented_llm_playback.py +0 -111
  125. mcp_agent/workflows/llm/providers/__init__.py +0 -8
  126. mcp_agent/workflows/orchestrator/__init__.py +0 -0
  127. mcp_agent/workflows/orchestrator/orchestrator.py +0 -535
  128. mcp_agent/workflows/parallel/__init__.py +0 -0
  129. mcp_agent/workflows/parallel/fan_in.py +0 -320
  130. mcp_agent/workflows/parallel/fan_out.py +0 -181
  131. mcp_agent/workflows/parallel/parallel_llm.py +0 -149
  132. mcp_agent/workflows/router/__init__.py +0 -0
  133. mcp_agent/workflows/router/router_base.py +0 -338
  134. mcp_agent/workflows/router/router_embedding.py +0 -226
  135. mcp_agent/workflows/router/router_embedding_cohere.py +0 -59
  136. mcp_agent/workflows/router/router_embedding_openai.py +0 -59
  137. mcp_agent/workflows/router/router_llm.py +0 -304
  138. mcp_agent/workflows/swarm/__init__.py +0 -0
  139. mcp_agent/workflows/swarm/swarm.py +0 -292
  140. mcp_agent/workflows/swarm/swarm_anthropic.py +0 -42
  141. mcp_agent/workflows/swarm/swarm_openai.py +0 -41
  142. {fast_agent_mcp-0.1.13.dist-info → fast_agent_mcp-0.2.0.dist-info}/WHEEL +0 -0
  143. {fast_agent_mcp-0.1.13.dist-info → fast_agent_mcp-0.2.0.dist-info}/entry_points.txt +0 -0
  144. {fast_agent_mcp-0.1.13.dist-info → fast_agent_mcp-0.2.0.dist-info}/licenses/LICENSE +0 -0
  145. /mcp_agent/{workflows/orchestrator → agents/workflow}/orchestrator_prompts.py +0 -0
  146. /mcp_agent/{workflows/llm → llm}/memory.py +0 -0
  147. /mcp_agent/{workflows/llm → llm}/prompt_utils.py +0 -0
mcp_agent/core/proxies.py DELETED
@@ -1,278 +0,0 @@
1
- """
2
- Proxy classes for agent interactions.
3
- These proxies provide a consistent interface for interacting with different types of agents.
4
-
5
- FOR COMPATIBILITY WITH LEGACY MCP-AGENT CODE
6
-
7
- """
8
-
9
- from typing import TYPE_CHECKING, Dict, List, Optional, Union
10
-
11
- from mcp.types import EmbeddedResource
12
-
13
- from mcp_agent.agents.agent import Agent
14
- from mcp_agent.app import MCPApp
15
- from mcp_agent.core.request_params import RequestParams
16
- from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
17
-
18
- # Handle circular imports
19
- if TYPE_CHECKING:
20
- from mcp_agent.core.types import ProxyDict, WorkflowType
21
- else:
22
- # Define minimal versions for runtime
23
- from typing import Any
24
-
25
- # Use Any for runtime to avoid circular imports
26
- WorkflowType = Any
27
- ProxyDict = Dict[str, "BaseAgentProxy"]
28
-
29
-
30
- class BaseAgentProxy:
31
- """Base class for all proxy types"""
32
-
33
- def __init__(self, app: MCPApp, name: str) -> None:
34
- self._app = app
35
- self._name = name
36
-
37
- async def __call__(self, message: Optional[str] = None) -> str:
38
- """Allow: agent.researcher('message') or just agent.researcher()"""
39
- if message is None:
40
- # When called with no arguments, use prompt() to open the interactive interface
41
- return await self.prompt()
42
- return await self.send(message)
43
-
44
- async def send(self, message: Optional[Union[str, PromptMessageMultipart]] = None) -> str:
45
- """
46
- Allow: agent.researcher.send('message') or agent.researcher.send(Prompt.user('message'))
47
-
48
- Args:
49
- message: Either a string message or a PromptMessageMultipart object
50
-
51
- Returns:
52
- The agent's response as a string
53
- """
54
- if message is None:
55
- # For consistency with agent(), use prompt() to open the interactive interface
56
- return await self.prompt()
57
-
58
- # If a PromptMessageMultipart is passed, use send_prompt
59
- if isinstance(message, PromptMessageMultipart):
60
- return await self.send_prompt(message)
61
-
62
- # For string messages, use generate_str (traditional behavior)
63
- return await self.generate_str(message)
64
-
65
- async def prompt(self, default_prompt: str = "") -> str:
66
- """Allow: agent.researcher.prompt()"""
67
- from mcp_agent.core.agent_app import AgentApp
68
-
69
- # First check if _app is directly an AgentApp
70
- if isinstance(self._app, AgentApp):
71
- return await self._app.prompt(self._name, default_prompt)
72
-
73
- # If not, check if it's an MCPApp with an _agent_app attribute
74
- if hasattr(self._app, "_agent_app"):
75
- agent_app = self._app._agent_app
76
- if agent_app:
77
- return await agent_app.prompt(self._name, default_prompt)
78
-
79
- # If we can't find an AgentApp, return an error message
80
- return "ERROR: Cannot prompt() - AgentApp not found"
81
-
82
- async def generate_str(self, message: str) -> str:
83
- """Generate response for a message - must be implemented by subclasses"""
84
- raise NotImplementedError("Subclasses must implement generate_str")
85
-
86
- async def send_prompt(self, prompt: PromptMessageMultipart) -> str:
87
- """Send a message to the agent and return the response"""
88
- raise NotImplementedError("Subclasses must implement send(prompt)")
89
-
90
- async def apply_prompt(self, prompt_name: str = None, arguments: dict[str, str] = None) -> str:
91
- """
92
- Apply a Prompt from an MCP Server - implemented by subclasses.
93
- This is the preferred method for applying prompts.
94
- Always returns an Assistant message.
95
-
96
- Args:
97
- prompt_name: Name of the prompt to apply
98
- arguments: Optional dictionary of string arguments for prompt templating
99
- """
100
- raise NotImplementedError("Subclasses must implement apply_prompt")
101
-
102
-
103
- class LLMAgentProxy(BaseAgentProxy):
104
- """Proxy for regular agents that use _llm.generate_str()"""
105
-
106
- def __init__(self, app: MCPApp, name: str, agent: Agent) -> None:
107
- super().__init__(app, name)
108
- self._agent = agent
109
-
110
- async def generate_str(self, message: str, **kwargs) -> str:
111
- """Forward message and all kwargs to the agent's LLM"""
112
- return await self._agent._llm.generate_str(message, **kwargs)
113
-
114
- async def send_prompt(self, prompt: PromptMessageMultipart) -> str:
115
- """Send a message to the agent and return the response"""
116
- return await self._agent._llm.generate_prompt(prompt, None)
117
-
118
- async def apply_prompt(self, prompt_name: str = None, arguments: dict[str, str] = None) -> str:
119
- """
120
- Apply a prompt from an MCP server.
121
- This is the preferred method for applying prompts.
122
-
123
- Args:
124
- prompt_name: Name of the prompt to apply
125
- arguments: Optional dictionary of string arguments for prompt templating
126
-
127
- Returns:
128
- The assistant's response
129
- """
130
- return await self._agent.apply_prompt(prompt_name, arguments)
131
-
132
- # Add the new methods
133
- async def get_embedded_resources(self, server_name: str, resource_name: str) -> List[EmbeddedResource]:
134
- """
135
- Get a resource from an MCP server and return it as a list of embedded resources ready for use in prompts.
136
-
137
- Args:
138
- server_name: Name of the MCP server to retrieve the resource from
139
- resource_name: Name or URI of the resource to retrieve
140
-
141
- Returns:
142
- List of EmbeddedResource objects ready to use in a PromptMessageMultipart
143
- """
144
- return await self._agent.get_embedded_resources(server_name, resource_name)
145
-
146
- async def with_resource(
147
- self,
148
- prompt_content: Union[str, PromptMessageMultipart],
149
- server_name: str,
150
- resource_name: str,
151
- ) -> str:
152
- """
153
- Create a prompt with the given content and resource, then send it to the agent.
154
-
155
- Args:
156
- prompt_content: Either a string message or an existing PromptMessageMultipart
157
- server_name: Name of the MCP server to retrieve the resource from
158
- resource_name: Name or URI of the resource to retrieve
159
-
160
- Returns:
161
- The agent's response as a string
162
- """
163
- return await self._agent.with_resource(prompt_content, server_name, resource_name)
164
-
165
- async def apply_prompt_messages(
166
- self,
167
- multipart_messages: List["PromptMessageMultipart"],
168
- request_params: RequestParams | None = None,
169
- ) -> str:
170
- """
171
- Apply a list of PromptMessageMultipart messages directly to the LLM.
172
- This is a cleaner interface to _apply_prompt_template_provider_specific.
173
-
174
- Args:
175
- multipart_messages: List of PromptMessageMultipart objects
176
- request_params: Optional parameters to configure the LLM request
177
-
178
- Returns:
179
- String representation of the assistant's response
180
- """
181
- # Delegate to the provider-specific implementation
182
- return await self._agent._llm._apply_prompt_template_provider_specific(multipart_messages, request_params)
183
-
184
-
185
- class WorkflowProxy(BaseAgentProxy):
186
- """Proxy for workflow types that implement generate_str() directly"""
187
-
188
- def __init__(self, app: MCPApp, name: str, workflow: WorkflowType) -> None:
189
- super().__init__(app, name)
190
- self._workflow = workflow
191
-
192
- async def generate_str(self, message: str, **kwargs) -> str:
193
- """Forward message and all kwargs to the underlying workflow"""
194
- return await self._workflow.generate_str(message, **kwargs)
195
-
196
-
197
- class RouterProxy(BaseAgentProxy):
198
- """Proxy for LLM Routers"""
199
-
200
- def __init__(self, app: MCPApp, name: str, workflow: WorkflowType) -> None:
201
- super().__init__(app, name)
202
- self._workflow = workflow
203
-
204
- async def generate_str(self, message: str, **kwargs) -> str:
205
- """
206
- Route the message and forward kwargs to the resulting agent if applicable.
207
- Note: For now, route() itself doesn't accept kwargs.
208
- """
209
- results = await self._workflow.route(message)
210
- if not results:
211
- return "No appropriate route found for the request."
212
-
213
- # Get the top result
214
- top_result = results[0]
215
- if isinstance(top_result.result, Agent):
216
- # Agent route - delegate to the agent, passing along kwargs
217
- agent = top_result.result
218
- return await agent._llm.generate_str(message, **kwargs)
219
- elif isinstance(top_result.result, str):
220
- # Server route - use the router directly
221
- return "Tool call requested by router - not yet supported"
222
-
223
- return f"Routed to: {top_result.result} ({top_result.confidence}): {top_result.reasoning}"
224
-
225
-
226
- class ChainProxy(BaseAgentProxy):
227
- """Proxy for chained agent operations"""
228
-
229
- def __init__(self, app: MCPApp, name: str, sequence: List[str], agent_proxies: ProxyDict) -> None:
230
- super().__init__(app, name)
231
- self._sequence = sequence
232
- self._agent_proxies = agent_proxies
233
- self._continue_with_final = True # Default behavior
234
- self._cumulative = False # Default to sequential chaining
235
-
236
- async def generate_str(self, message: str, **kwargs) -> str:
237
- """Chain message through a sequence of agents.
238
-
239
- For the first agent in the chain, pass all kwargs to maintain transparency.
240
-
241
- Two modes of operation:
242
- 1. Sequential (default): Each agent receives only the output of the previous agent
243
- 2. Cumulative: Each agent receives all previous agent responses concatenated
244
- """
245
- if not self._sequence:
246
- return message
247
-
248
- # Process the first agent (same for both modes)
249
- first_agent = self._sequence[0]
250
- first_proxy = self._agent_proxies[first_agent]
251
- first_response = await first_proxy.generate_str(message, **kwargs)
252
-
253
- if len(self._sequence) == 1:
254
- return first_response
255
-
256
- if self._cumulative:
257
- # Cumulative mode: each agent gets all previous responses
258
- cumulative_response = f'<fastagent:response agent="{first_agent}">\n{first_response}\n</fastagent:response>'
259
-
260
- # Process subsequent agents with cumulative results
261
- for agent_name in self._sequence[1:]:
262
- proxy = self._agent_proxies[agent_name]
263
- # Pass all previous responses to next agent
264
- agent_response = await proxy.generate_str(cumulative_response)
265
- # Add this agent's response to the cumulative result
266
- cumulative_response += f'\n\n<fastagent:response agent="{agent_name}">\n{agent_response}\n</fastagent:response>'
267
-
268
- return cumulative_response
269
- else:
270
- # Sequential chaining (original behavior)
271
- current_message = first_response
272
-
273
- # For subsequent agents, just pass the message from previous agent
274
- for agent_name in self._sequence[1:]:
275
- proxy = self._agent_proxies[agent_name]
276
- current_message = await proxy.generate_str(current_message)
277
-
278
- return current_message
mcp_agent/core/types.py DELETED
@@ -1,22 +0,0 @@
1
- """
2
- Type definitions for fast-agent core module.
3
- """
4
-
5
- from typing import TYPE_CHECKING, Dict, TypeAlias, Union
6
-
7
- from mcp_agent.agents.agent import Agent
8
- from mcp_agent.workflows.evaluator_optimizer.evaluator_optimizer import (
9
- EvaluatorOptimizerLLM,
10
- )
11
- from mcp_agent.workflows.orchestrator.orchestrator import Orchestrator
12
- from mcp_agent.workflows.parallel.parallel_llm import ParallelLLM
13
- from mcp_agent.workflows.router.router_llm import LLMRouter
14
-
15
- # Avoid circular imports
16
- if TYPE_CHECKING:
17
- from mcp_agent.core.proxies import BaseAgentProxy
18
-
19
- # Type aliases for better readability
20
- WorkflowType: TypeAlias = Union[Orchestrator, ParallelLLM, EvaluatorOptimizerLLM, LLMRouter]
21
- AgentOrWorkflow: TypeAlias = Union[Agent, WorkflowType]
22
- ProxyDict: TypeAlias = Dict[str, "BaseAgentProxy"] # Forward reference as string
File without changes
mcp_agent/mcp/stdio.py DELETED
@@ -1,114 +0,0 @@
1
- """
2
- Custom implementation of stdio_client that handles stderr through rich console.
3
- """
4
-
5
- import subprocess
6
- from contextlib import asynccontextmanager
7
- from typing import TYPE_CHECKING
8
-
9
- import anyio
10
- import mcp.types as types
11
- from anyio.streams.text import TextReceiveStream
12
- from mcp.client.stdio import StdioServerParameters, get_default_environment
13
-
14
- from mcp_agent.logging.logger import get_logger
15
-
16
- if TYPE_CHECKING:
17
- from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream
18
-
19
- logger = get_logger(__name__)
20
-
21
-
22
- # TODO this will be removed when client library with https://github.com/modelcontextprotocol/python-sdk/pull/343 is released
23
- @asynccontextmanager
24
- async def stdio_client_with_rich_stderr(server: StdioServerParameters):
25
- """
26
- Modified version of stdio_client that captures stderr and routes it through our rich console.
27
- Follows the original pattern closely for reliability.
28
-
29
- Args:
30
- server: The server parameters for the stdio connection
31
- """
32
- read_stream: MemoryObjectReceiveStream[types.JSONRPCMessage | Exception]
33
- read_stream_writer: MemoryObjectSendStream[types.JSONRPCMessage | Exception]
34
-
35
- write_stream: MemoryObjectSendStream[types.JSONRPCMessage]
36
- write_stream_reader: MemoryObjectReceiveStream[types.JSONRPCMessage]
37
-
38
- read_stream_writer, read_stream = anyio.create_memory_object_stream(0)
39
- write_stream, write_stream_reader = anyio.create_memory_object_stream(0)
40
- # Open process with stderr piped for capture
41
-
42
- process = await anyio.open_process(
43
- [server.command, *server.args],
44
- env=server.env if server.env is not None else get_default_environment(),
45
- stderr=subprocess.PIPE,
46
- )
47
-
48
- if process.pid:
49
- logger.debug(f"Started process '{server.command}' with PID: {process.pid}")
50
-
51
- if process.returncode is not None:
52
- logger.debug(f"return code (early){process.returncode}")
53
- raise RuntimeError(f"Process terminated immediately with code {process.returncode}")
54
-
55
- async def stdout_reader() -> None:
56
- assert process.stdout, "Opened process is missing stdout"
57
- try:
58
- async with read_stream_writer:
59
- buffer = ""
60
- async for chunk in TextReceiveStream(
61
- process.stdout,
62
- encoding=server.encoding,
63
- errors=server.encoding_error_handler,
64
- ):
65
- lines = (buffer + chunk).split("\n")
66
- buffer = lines.pop()
67
-
68
- for line in lines:
69
- if not line:
70
- continue
71
- try:
72
- message = types.JSONRPCMessage.model_validate_json(line)
73
- except Exception as exc:
74
- await read_stream_writer.send(exc)
75
- continue
76
-
77
- await read_stream_writer.send(message)
78
- except anyio.ClosedResourceError:
79
- await anyio.lowlevel.checkpoint()
80
-
81
- # async def stderr_reader():
82
- # assert process.stderr, "Opened process is missing stderr"
83
- # try:
84
- # async for chunk in TextReceiveStream(
85
- # process.stderr,
86
- # encoding=server.encoding,
87
- # errors=server.encoding_error_handler,
88
- # ):
89
- # if chunk.strip():
90
- # # Let the logging system handle the formatting consistently
91
- # logger.event("info", "mcpserver.stderr", chunk.rstrip(), None, {})
92
- # except anyio.ClosedResourceError:
93
- # await anyio.lowlevel.checkpoint()
94
-
95
- async def stdin_writer() -> None:
96
- assert process.stdin, "Opened process is missing stdin"
97
- try:
98
- async with write_stream_reader:
99
- async for message in write_stream_reader:
100
- json = message.model_dump_json(by_alias=True, exclude_none=True)
101
- await process.stdin.send(
102
- (json + "\n").encode(
103
- encoding=server.encoding,
104
- errors=server.encoding_error_handler,
105
- )
106
- )
107
- except anyio.ClosedResourceError:
108
- await anyio.lowlevel.checkpoint()
109
-
110
- # Use context managers to handle cleanup automatically
111
- async with anyio.create_task_group() as tg, process:
112
- tg.start_soon(stdout_reader)
113
- tg.start_soon(stdin_writer)
114
- yield read_stream, write_stream
@@ -1,188 +0,0 @@
1
- import asyncio
2
-
3
- from mcp_agent.core.fastagent import FastAgent
4
- from mcp_agent.workflows.llm.augmented_llm import RequestParams
5
-
6
- # Create the application
7
- fast = FastAgent("Data Analysis & Campaign Generator")
8
-
9
-
10
- # Original data analysis components
11
- @fast.agent(
12
- name="data_analysis",
13
- instruction="""
14
- You have access to a Python 3.12 interpreter and you can use this to analyse and process data.
15
- Common analysis packages such as Pandas, Seaborn and Matplotlib are already installed.
16
- You can add further packages if needed.
17
- Data files are accessible from the /mnt/data/ directory (this is the current working directory).
18
- Visualisations should be saved as .png files in the current working directory.
19
- Extract key insights that would be compelling for a social media campaign.
20
- """,
21
- servers=["interpreter"],
22
- request_params=RequestParams(maxTokens=8192),
23
- model="sonnet",
24
- )
25
- @fast.agent(
26
- "evaluator",
27
- """You are collaborating with a Data Analysis tool that has the capability to analyse data and produce visualisations.
28
- You must make sure that the tool has:
29
- - Considered the best way for a Human to interpret the data
30
- - Produced insightful visualisations.
31
- - Provided a high level summary report for the Human.
32
- - Has had its findings challenged, and justified
33
- - Extracted compelling insights suitable for social media promotion
34
- """,
35
- request_params=RequestParams(maxTokens=8192),
36
- model="gpt-4o",
37
- )
38
- @fast.evaluator_optimizer(
39
- "analysis_tool",
40
- generator="data_analysis",
41
- evaluator="evaluator",
42
- max_refinements=3,
43
- min_rating="EXCELLENT",
44
- )
45
- # Research component using Brave search
46
- @fast.agent(
47
- "context_researcher",
48
- """You are a research specialist who provides cultural context for different regions.
49
- For any given data insight and target language/region, research:
50
- 1. Cultural sensitivities related to presenting this type of data
51
- 2. Local social media trends and preferences
52
- 3. Region-specific considerations for marketing campaigns
53
-
54
- Always provide actionable recommendations for adapting content to each culture.
55
- """,
56
- servers=["fetch", "brave"], # Using the fetch MCP server for Brave search
57
- request_params=RequestParams(temperature=0.3),
58
- model="gpt-4o",
59
- )
60
- # Social media content generator
61
- @fast.agent(
62
- "campaign_generator",
63
- """Generate engaging social media content based on data insights.
64
- Create compelling, shareable content that:
65
- - Highlights key research findings in an accessible way
66
- - Uses appropriate tone for the platform (Twitter/X, LinkedIn, Instagram, etc.)
67
- - Is concise and impactful
68
- - Includes suggested hashtags and posting schedule
69
-
70
- Format your response with clear sections for each platform.
71
- Save different campaign elements as separate files in the current directory.
72
- """,
73
- servers=["filesystem"], # Using filesystem MCP server to save files
74
- request_params=RequestParams(temperature=0.7),
75
- model="sonnet",
76
- use_history=False,
77
- )
78
- # Translation agents with cultural adaptation
79
- @fast.agent(
80
- "translate_fr",
81
- """Translate social media content to French with cultural adaptation.
82
- Consider French cultural norms, expressions, and social media preferences.
83
- Ensure the translation maintains the impact of the original while being culturally appropriate.
84
- Save the translated content to a file with appropriate naming.
85
- """,
86
- model="haiku",
87
- use_history=False,
88
- servers=["filesystem"],
89
- )
90
- @fast.agent(
91
- "translate_es",
92
- """Translate social media content to Spanish with cultural adaptation.
93
- Consider Spanish-speaking cultural contexts, expressions, and social media preferences.
94
- Ensure the translation maintains the impact of the original while being culturally appropriate.
95
- Save the translated content to a file with appropriate naming.
96
- """,
97
- model="haiku",
98
- use_history=False,
99
- servers=["filesystem"],
100
- )
101
- @fast.agent(
102
- "translate_de",
103
- """Translate social media content to German with cultural adaptation.
104
- Consider German cultural norms, expressions, and social media preferences.
105
- Ensure the translation maintains the impact of the original while being culturally appropriate.
106
- Save the translated content to a file with appropriate naming.
107
- """,
108
- model="haiku",
109
- use_history=False,
110
- servers=["filesystem"],
111
- )
112
- @fast.agent(
113
- "translate_ja",
114
- """Translate social media content to Japanese with cultural adaptation.
115
- Consider Japanese cultural norms, expressions, and social media preferences.
116
- Ensure the translation maintains the impact of the original while being culturally appropriate.
117
- Save the translated content to a file with appropriate naming.
118
- """,
119
- model="haiku",
120
- use_history=False,
121
- servers=["filesystem"],
122
- )
123
- # Parallel workflow for translations
124
- @fast.parallel(
125
- "translate_campaign",
126
- instruction="Translates content to French, Spanish, German and Japanese. Supply the content to translate, translations will be saved to the filesystem.",
127
- fan_out=["translate_fr", "translate_es", "translate_de", "translate_ja"],
128
- include_request=True,
129
- )
130
- # Cultural sensitivity review agent
131
- @fast.agent(
132
- "cultural_reviewer",
133
- """Review all translated content for cultural sensitivity and appropriateness.
134
- For each language version, evaluate:
135
- - Cultural appropriateness
136
- - Potential misunderstandings or sensitivities
137
- - Effectiveness for the target culture
138
-
139
- Provide specific recommendations for any needed adjustments and save a review report.
140
- """,
141
- servers=["filesystem"],
142
- request_params=RequestParams(temperature=0.2),
143
- )
144
- # Campaign optimization workflow
145
- @fast.evaluator_optimizer(
146
- "campaign_optimizer",
147
- generator="campaign_generator",
148
- evaluator="cultural_reviewer",
149
- max_refinements=2,
150
- min_rating="EXCELLENT",
151
- )
152
- # Main workflow orchestration
153
- @fast.orchestrator(
154
- "research_campaign_creator",
155
- instruction="""
156
- Create a complete multi-lingual social media campaign based on data analysis results.
157
- The workflow will:
158
- 1. Analyze the provided data and extract key insights
159
- 2. Research cultural contexts for target languages
160
- 3. Generate appropriate social media content
161
- 4. Translate and culturally adapt the content
162
- 5. Review and optimize all materials
163
- 6. Save all campaign elements to files
164
- """,
165
- agents=[
166
- "analysis_tool",
167
- "context_researcher",
168
- "campaign_optimizer",
169
- "translate_campaign",
170
- ],
171
- model="sonnet", # Using a more capable model for orchestration
172
- request_params=RequestParams(maxTokens=8192),
173
- plan_type="full",
174
- )
175
- async def main() -> None:
176
- # Use the app's context manager
177
- print(
178
- "WARNING: This workflow will likely run for >10 minutes and consume a lot of tokens. Press Enter to accept the default prompt and proceed"
179
- )
180
-
181
- async with fast.run() as agent:
182
- await agent.research_campaign_creator.prompt(
183
- default_prompt="Analyze the CSV file in the current directory and create a comprehensive multi-lingual social media campaign based on the findings. Save all campaign elements as separate files."
184
- )
185
-
186
-
187
- if __name__ == "__main__":
188
- asyncio.run(main())
@@ -1,65 +0,0 @@
1
- import asyncio
2
-
3
- from mcp_agent.core.fastagent import FastAgent
4
- from mcp_agent.workflows.llm.augmented_llm import RequestParams
5
-
6
- # Create the application
7
- fast = FastAgent("Data Analysis (Roots)")
8
-
9
-
10
- # The sample data is under Database Contents License (DbCL) v1.0.
11
- # Available here : https://www.kaggle.com/datasets/pavansubhasht/ibm-hr-analytics-attrition-dataset
12
-
13
-
14
- @fast.agent(
15
- name="data_analysis",
16
- instruction="""
17
- You have access to a Python 3.12 interpreter and you can use this to analyse and process data.
18
- Common analysis packages such as Pandas, Seaborn and Matplotlib are already installed.
19
- You can add further packages if needed.
20
- Data files are accessible from the /mnt/data/ directory (this is the current working directory).
21
- Visualisations should be saved as .png files in the current working directory.
22
- """,
23
- servers=["interpreter"],
24
- request_params=RequestParams(maxTokens=8192),
25
- )
26
- async def main() -> None:
27
- # Use the app's context manager
28
- async with fast.run() as agent:
29
- await agent(
30
- "There is a csv file in the current directory. "
31
- "Analyse the file, produce a detailed description of the data, and any patterns it contains.",
32
- )
33
- await agent(
34
- "Consider the data, and how to usefully group it for presentation to a Human. Find insights, using the Python Interpreter as needed.\n"
35
- "Use MatPlotLib to produce insightful visualisations. Save them as '.png' files in the current directory. Be sure to run the code and save the files.\n"
36
- "Produce a summary with major insights to the data",
37
- )
38
- await agent()
39
-
40
-
41
- if __name__ == "__main__":
42
- asyncio.run(main())
43
-
44
-
45
- ############################################################################################################
46
- # Example of evaluator/optimizer flow
47
- ############################################################################################################
48
- # @fast.agent(
49
- # "evaluator",
50
- # """You are collaborating with a Data Analysis tool that has the capability to analyse data and produce visualisations.
51
- # You must make sure that the tool has:
52
- # - Considered the best way for a Human to interpret the data
53
- # - Produced insightful visualasions.
54
- # - Provided a high level summary report for the Human.
55
- # - Has had its findings challenged, and justified
56
- # """,
57
- # request_params=RequestParams(maxTokens=8192),
58
- # )
59
- # @fast.evaluator_optimizer(
60
- # "analysis_tool",
61
- # generator="data_analysis",
62
- # evaluator="evaluator",
63
- # max_refinements=3,
64
- # min_rating="EXCELLENT",
65
- # )