fast-agent-mcp 0.1.13__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (147) hide show
  1. {fast_agent_mcp-0.1.13.dist-info → fast_agent_mcp-0.2.0.dist-info}/METADATA +3 -4
  2. fast_agent_mcp-0.2.0.dist-info/RECORD +123 -0
  3. mcp_agent/__init__.py +75 -0
  4. mcp_agent/agents/agent.py +59 -371
  5. mcp_agent/agents/base_agent.py +522 -0
  6. mcp_agent/agents/workflow/__init__.py +1 -0
  7. mcp_agent/agents/workflow/chain_agent.py +173 -0
  8. mcp_agent/agents/workflow/evaluator_optimizer.py +362 -0
  9. mcp_agent/agents/workflow/orchestrator_agent.py +591 -0
  10. mcp_agent/{workflows/orchestrator → agents/workflow}/orchestrator_models.py +27 -11
  11. mcp_agent/agents/workflow/parallel_agent.py +182 -0
  12. mcp_agent/agents/workflow/router_agent.py +307 -0
  13. mcp_agent/app.py +3 -1
  14. mcp_agent/cli/commands/bootstrap.py +18 -7
  15. mcp_agent/cli/commands/setup.py +12 -4
  16. mcp_agent/cli/main.py +1 -1
  17. mcp_agent/cli/terminal.py +1 -1
  18. mcp_agent/config.py +24 -35
  19. mcp_agent/context.py +3 -1
  20. mcp_agent/context_dependent.py +3 -1
  21. mcp_agent/core/agent_types.py +10 -7
  22. mcp_agent/core/direct_agent_app.py +179 -0
  23. mcp_agent/core/direct_decorators.py +443 -0
  24. mcp_agent/core/direct_factory.py +476 -0
  25. mcp_agent/core/enhanced_prompt.py +15 -20
  26. mcp_agent/core/fastagent.py +151 -337
  27. mcp_agent/core/interactive_prompt.py +424 -0
  28. mcp_agent/core/mcp_content.py +19 -11
  29. mcp_agent/core/prompt.py +6 -2
  30. mcp_agent/core/validation.py +89 -16
  31. mcp_agent/executor/decorator_registry.py +6 -2
  32. mcp_agent/executor/temporal.py +35 -11
  33. mcp_agent/executor/workflow_signal.py +8 -2
  34. mcp_agent/human_input/handler.py +3 -1
  35. mcp_agent/llm/__init__.py +2 -0
  36. mcp_agent/{workflows/llm → llm}/augmented_llm.py +131 -256
  37. mcp_agent/{workflows/llm → llm}/augmented_llm_passthrough.py +35 -107
  38. mcp_agent/llm/augmented_llm_playback.py +83 -0
  39. mcp_agent/{workflows/llm → llm}/model_factory.py +26 -8
  40. mcp_agent/llm/providers/__init__.py +8 -0
  41. mcp_agent/{workflows/llm → llm/providers}/anthropic_utils.py +5 -1
  42. mcp_agent/{workflows/llm → llm/providers}/augmented_llm_anthropic.py +37 -141
  43. mcp_agent/llm/providers/augmented_llm_deepseek.py +53 -0
  44. mcp_agent/{workflows/llm → llm/providers}/augmented_llm_openai.py +112 -148
  45. mcp_agent/{workflows/llm → llm}/providers/multipart_converter_anthropic.py +78 -35
  46. mcp_agent/{workflows/llm → llm}/providers/multipart_converter_openai.py +73 -44
  47. mcp_agent/{workflows/llm → llm}/providers/openai_multipart.py +18 -4
  48. mcp_agent/{workflows/llm → llm/providers}/openai_utils.py +3 -3
  49. mcp_agent/{workflows/llm → llm}/providers/sampling_converter_anthropic.py +3 -3
  50. mcp_agent/{workflows/llm → llm}/providers/sampling_converter_openai.py +3 -3
  51. mcp_agent/{workflows/llm → llm}/sampling_converter.py +0 -21
  52. mcp_agent/{workflows/llm → llm}/sampling_format_converter.py +16 -1
  53. mcp_agent/logging/logger.py +2 -2
  54. mcp_agent/mcp/gen_client.py +9 -3
  55. mcp_agent/mcp/interfaces.py +67 -45
  56. mcp_agent/mcp/logger_textio.py +97 -0
  57. mcp_agent/mcp/mcp_agent_client_session.py +12 -4
  58. mcp_agent/mcp/mcp_agent_server.py +3 -1
  59. mcp_agent/mcp/mcp_aggregator.py +124 -93
  60. mcp_agent/mcp/mcp_connection_manager.py +21 -7
  61. mcp_agent/mcp/prompt_message_multipart.py +59 -1
  62. mcp_agent/mcp/prompt_render.py +77 -0
  63. mcp_agent/mcp/prompt_serialization.py +20 -13
  64. mcp_agent/mcp/prompts/prompt_constants.py +18 -0
  65. mcp_agent/mcp/prompts/prompt_helpers.py +327 -0
  66. mcp_agent/mcp/prompts/prompt_load.py +15 -5
  67. mcp_agent/mcp/prompts/prompt_server.py +154 -87
  68. mcp_agent/mcp/prompts/prompt_template.py +26 -35
  69. mcp_agent/mcp/resource_utils.py +3 -1
  70. mcp_agent/mcp/sampling.py +24 -15
  71. mcp_agent/mcp_server/agent_server.py +8 -5
  72. mcp_agent/mcp_server_registry.py +22 -9
  73. mcp_agent/resources/examples/{workflows → in_dev}/agent_build.py +1 -1
  74. mcp_agent/resources/examples/{data-analysis → in_dev}/slides.py +1 -1
  75. mcp_agent/resources/examples/internal/agent.py +4 -2
  76. mcp_agent/resources/examples/internal/fastagent.config.yaml +8 -2
  77. mcp_agent/resources/examples/prompting/image_server.py +3 -1
  78. mcp_agent/resources/examples/prompting/work_with_image.py +19 -0
  79. mcp_agent/ui/console_display.py +27 -7
  80. fast_agent_mcp-0.1.13.dist-info/RECORD +0 -164
  81. mcp_agent/core/agent_app.py +0 -570
  82. mcp_agent/core/agent_utils.py +0 -69
  83. mcp_agent/core/decorators.py +0 -448
  84. mcp_agent/core/factory.py +0 -422
  85. mcp_agent/core/proxies.py +0 -278
  86. mcp_agent/core/types.py +0 -22
  87. mcp_agent/eval/__init__.py +0 -0
  88. mcp_agent/mcp/stdio.py +0 -114
  89. mcp_agent/resources/examples/data-analysis/analysis-campaign.py +0 -188
  90. mcp_agent/resources/examples/data-analysis/analysis.py +0 -65
  91. mcp_agent/resources/examples/data-analysis/fastagent.config.yaml +0 -41
  92. mcp_agent/resources/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +0 -1471
  93. mcp_agent/resources/examples/mcp_researcher/researcher-eval.py +0 -53
  94. mcp_agent/resources/examples/researcher/fastagent.config.yaml +0 -66
  95. mcp_agent/resources/examples/researcher/researcher-eval.py +0 -53
  96. mcp_agent/resources/examples/researcher/researcher-imp.py +0 -189
  97. mcp_agent/resources/examples/researcher/researcher.py +0 -39
  98. mcp_agent/resources/examples/workflows/chaining.py +0 -45
  99. mcp_agent/resources/examples/workflows/evaluator.py +0 -79
  100. mcp_agent/resources/examples/workflows/fastagent.config.yaml +0 -24
  101. mcp_agent/resources/examples/workflows/human_input.py +0 -26
  102. mcp_agent/resources/examples/workflows/orchestrator.py +0 -74
  103. mcp_agent/resources/examples/workflows/parallel.py +0 -79
  104. mcp_agent/resources/examples/workflows/router.py +0 -54
  105. mcp_agent/resources/examples/workflows/sse.py +0 -23
  106. mcp_agent/telemetry/__init__.py +0 -0
  107. mcp_agent/telemetry/usage_tracking.py +0 -19
  108. mcp_agent/workflows/__init__.py +0 -0
  109. mcp_agent/workflows/embedding/__init__.py +0 -0
  110. mcp_agent/workflows/embedding/embedding_base.py +0 -58
  111. mcp_agent/workflows/embedding/embedding_cohere.py +0 -49
  112. mcp_agent/workflows/embedding/embedding_openai.py +0 -37
  113. mcp_agent/workflows/evaluator_optimizer/__init__.py +0 -0
  114. mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py +0 -447
  115. mcp_agent/workflows/intent_classifier/__init__.py +0 -0
  116. mcp_agent/workflows/intent_classifier/intent_classifier_base.py +0 -117
  117. mcp_agent/workflows/intent_classifier/intent_classifier_embedding.py +0 -130
  118. mcp_agent/workflows/intent_classifier/intent_classifier_embedding_cohere.py +0 -41
  119. mcp_agent/workflows/intent_classifier/intent_classifier_embedding_openai.py +0 -41
  120. mcp_agent/workflows/intent_classifier/intent_classifier_llm.py +0 -150
  121. mcp_agent/workflows/intent_classifier/intent_classifier_llm_anthropic.py +0 -60
  122. mcp_agent/workflows/intent_classifier/intent_classifier_llm_openai.py +0 -58
  123. mcp_agent/workflows/llm/__init__.py +0 -0
  124. mcp_agent/workflows/llm/augmented_llm_playback.py +0 -111
  125. mcp_agent/workflows/llm/providers/__init__.py +0 -8
  126. mcp_agent/workflows/orchestrator/__init__.py +0 -0
  127. mcp_agent/workflows/orchestrator/orchestrator.py +0 -535
  128. mcp_agent/workflows/parallel/__init__.py +0 -0
  129. mcp_agent/workflows/parallel/fan_in.py +0 -320
  130. mcp_agent/workflows/parallel/fan_out.py +0 -181
  131. mcp_agent/workflows/parallel/parallel_llm.py +0 -149
  132. mcp_agent/workflows/router/__init__.py +0 -0
  133. mcp_agent/workflows/router/router_base.py +0 -338
  134. mcp_agent/workflows/router/router_embedding.py +0 -226
  135. mcp_agent/workflows/router/router_embedding_cohere.py +0 -59
  136. mcp_agent/workflows/router/router_embedding_openai.py +0 -59
  137. mcp_agent/workflows/router/router_llm.py +0 -304
  138. mcp_agent/workflows/swarm/__init__.py +0 -0
  139. mcp_agent/workflows/swarm/swarm.py +0 -292
  140. mcp_agent/workflows/swarm/swarm_anthropic.py +0 -42
  141. mcp_agent/workflows/swarm/swarm_openai.py +0 -41
  142. {fast_agent_mcp-0.1.13.dist-info → fast_agent_mcp-0.2.0.dist-info}/WHEEL +0 -0
  143. {fast_agent_mcp-0.1.13.dist-info → fast_agent_mcp-0.2.0.dist-info}/entry_points.txt +0 -0
  144. {fast_agent_mcp-0.1.13.dist-info → fast_agent_mcp-0.2.0.dist-info}/licenses/LICENSE +0 -0
  145. /mcp_agent/{workflows/orchestrator → agents/workflow}/orchestrator_prompts.py +0 -0
  146. /mcp_agent/{workflows/llm → llm}/memory.py +0 -0
  147. /mcp_agent/{workflows/llm → llm}/prompt_utils.py +0 -0
@@ -0,0 +1,522 @@
1
+ """
2
+ Base Agent class that implements the AgentProtocol interface.
3
+
4
+ This class provides default implementations of the standard agent methods
5
+ and delegates operations to an attached AugmentedLLMProtocol instance.
6
+ """
7
+
8
+ import asyncio
9
+ import uuid
10
+ from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Type, TypeVar, Union
11
+
12
+ from mcp.types import (
13
+ CallToolResult,
14
+ EmbeddedResource,
15
+ ListToolsResult,
16
+ ReadResourceResult,
17
+ TextContent,
18
+ Tool,
19
+ )
20
+ from pydantic import BaseModel
21
+
22
+ from mcp_agent.core.agent_types import AgentConfig
23
+ from mcp_agent.core.exceptions import PromptExitError
24
+ from mcp_agent.core.prompt import Prompt
25
+ from mcp_agent.core.request_params import RequestParams
26
+ from mcp_agent.human_input.types import (
27
+ HUMAN_INPUT_SIGNAL_NAME,
28
+ HumanInputCallback,
29
+ HumanInputRequest,
30
+ HumanInputResponse,
31
+ )
32
+ from mcp_agent.logging.logger import get_logger
33
+ from mcp_agent.mcp.interfaces import AgentProtocol, AugmentedLLMProtocol
34
+ from mcp_agent.mcp.mcp_aggregator import MCPAggregator
35
+ from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
36
+
37
+ # Define a TypeVar for models
38
+ ModelT = TypeVar("ModelT", bound=BaseModel)
39
+
40
+ # Define a TypeVar for AugmentedLLM and its subclasses
41
+ LLM = TypeVar("LLM", bound=AugmentedLLMProtocol)
42
+
43
+ HUMAN_INPUT_TOOL_NAME = "__human_input__"
44
+ if TYPE_CHECKING:
45
+ from mcp_agent.context import Context
46
+
47
+
48
+ class BaseAgent(MCPAggregator, AgentProtocol):
49
+ """
50
+ A base Agent class that implements the AgentProtocol interface.
51
+
52
+ This class provides default implementations of the standard agent methods
53
+ and delegates LLM operations to an attached AugmentedLLMProtocol instance.
54
+ """
55
+
56
+ def __init__(
57
+ self,
58
+ config: AgentConfig,
59
+ functions: Optional[List[Callable]] = None,
60
+ connection_persistence: bool = True,
61
+ human_input_callback: Optional[HumanInputCallback] = None,
62
+ context: Optional["Context"] = None,
63
+ **kwargs: Dict[str, Any],
64
+ ) -> None:
65
+ self.config = config
66
+
67
+ super().__init__(
68
+ context=context,
69
+ server_names=self.config.servers,
70
+ connection_persistence=connection_persistence,
71
+ name=self.config.name,
72
+ **kwargs,
73
+ )
74
+
75
+ self._context = context
76
+ self.name = self.config.name
77
+ self.instruction = self.config.instruction
78
+ self.functions = functions or []
79
+ self.executor = self.context.executor if context and hasattr(context, "executor") else None
80
+ self.logger = get_logger(f"{__name__}.{self.name}")
81
+
82
+ # Store the default request params from config
83
+ self._default_request_params = self.config.default_request_params
84
+
85
+ # Initialize the LLM to None (will be set by attach_llm)
86
+ self._llm: Optional[AugmentedLLMProtocol] = None
87
+
88
+ # Map function names to tools
89
+ self._function_tool_map: Dict[str, Any] = {}
90
+
91
+ if not self.config.human_input:
92
+ self.human_input_callback = None
93
+ else:
94
+ self.human_input_callback: Optional[HumanInputCallback] = human_input_callback
95
+ if not human_input_callback and context and hasattr(context, "human_input_handler"):
96
+ self.human_input_callback = context.human_input_handler
97
+
98
+ async def initialize(self) -> None:
99
+ """
100
+ Initialize the agent and connect to the MCP servers.
101
+ NOTE: This method is called automatically when the agent is used as an async context manager.
102
+ """
103
+ await self.__aenter__() # This initializes the connection manager and loads the servers
104
+
105
+ async def attach_llm(self, llm_factory: Union[Type[LLM], Callable[..., LLM]], **kwargs) -> LLM:
106
+ """
107
+ Create an LLM instance for the agent.
108
+
109
+ Args:
110
+ llm_factory: A class or callable that constructs an AugmentedLLM or its subclass.
111
+ The factory should accept keyword arguments matching the
112
+ AugmentedLLM constructor parameters.
113
+ **kwargs: Additional keyword arguments to pass to the LLM constructor.
114
+
115
+ Returns:
116
+ An instance of AugmentedLLM or one of its subclasses.
117
+ """
118
+
119
+ self._llm = llm_factory(
120
+ agent=self, default_request_params=self._default_request_params, **kwargs
121
+ )
122
+
123
+ return self._llm
124
+
125
+ async def shutdown(self) -> None:
126
+ """
127
+ Shutdown the agent and close all MCP server connections.
128
+ NOTE: This method is called automatically when the agent is used as an async context manager.
129
+ """
130
+ await super().close()
131
+
132
+ async def __call__(
133
+ self,
134
+ message: Union[str, PromptMessageMultipart] | None = None,
135
+ agent_name: Optional[str] = None,
136
+ default_prompt: str = "",
137
+ ) -> str:
138
+ """
139
+ Make the agent callable to send messages or start an interactive prompt.
140
+
141
+ Args:
142
+ message: Optional message to send to the agent
143
+ agent_name: Optional name of the agent (for consistency with DirectAgentApp)
144
+ default: Default message to use in interactive prompt mode
145
+
146
+ Returns:
147
+ The agent's response as a string or the result of the interactive session
148
+ """
149
+ if message:
150
+ return await self.send(message)
151
+ return await self.prompt(default_prompt=default_prompt)
152
+
153
+ async def generate_str(self, message: str, request_params: RequestParams | None) -> str:
154
+ result: PromptMessageMultipart = await self.generate([Prompt.user(message)], request_params)
155
+ return result.first_text()
156
+
157
+ async def send(self, message: Union[str, PromptMessageMultipart]) -> str:
158
+ """
159
+ Send a message to the agent and get a response.
160
+
161
+ Args:
162
+ message: Either a string message or a PromptMessageMultipart object
163
+
164
+ Returns:
165
+ The agent's response as a string
166
+ """
167
+
168
+ # Create a PromptMessageMultipart if we received a string
169
+ if isinstance(message, str):
170
+ prompt = Prompt.user(message)
171
+ else:
172
+ prompt = message
173
+
174
+ # Use the LLM to generate a response
175
+ response = await self.generate([prompt], None)
176
+ return response.first_text()
177
+
178
+ async def prompt(self, default_prompt: str = "") -> str:
179
+ """
180
+ Start an interactive prompt session with the agent.
181
+
182
+ Args:
183
+ default_prompt: The initial prompt to send to the agent
184
+
185
+ Returns:
186
+ The result of the interactive session
187
+ """
188
+ ...
189
+
190
+ async def request_human_input(self, request: HumanInputRequest) -> str:
191
+ """
192
+ Request input from a human user. Pauses the workflow until input is received.
193
+
194
+ Args:
195
+ request: The human input request
196
+
197
+ Returns:
198
+ The input provided by the human
199
+
200
+ Raises:
201
+ TimeoutError: If the timeout is exceeded
202
+ """
203
+ if not self.human_input_callback:
204
+ raise ValueError("Human input callback not set")
205
+
206
+ # Generate a unique ID for this request to avoid signal collisions
207
+ request_id = f"{HUMAN_INPUT_SIGNAL_NAME}_{self.name}_{uuid.uuid4()}"
208
+ request.request_id = request_id
209
+ # Use metadata as a dictionary to pass agent name
210
+ request.metadata = {"agent_name": self.name}
211
+ self.logger.debug("Requesting human input:", data=request)
212
+
213
+ if not self.executor:
214
+ raise ValueError("No executor available")
215
+
216
+ async def call_callback_and_signal() -> None:
217
+ try:
218
+ assert self.human_input_callback is not None
219
+ user_input = await self.human_input_callback(request)
220
+
221
+ self.logger.debug("Received human input:", data=user_input)
222
+ await self.executor.signal(signal_name=request_id, payload=user_input)
223
+ except PromptExitError as e:
224
+ # Propagate the exit error through the signal system
225
+ self.logger.info("User requested to exit session")
226
+ await self.executor.signal(
227
+ signal_name=request_id,
228
+ payload={"exit_requested": True, "error": str(e)},
229
+ )
230
+ except Exception as e:
231
+ await self.executor.signal(
232
+ request_id, payload=f"Error getting human input: {str(e)}"
233
+ )
234
+
235
+ asyncio.create_task(call_callback_and_signal())
236
+
237
+ self.logger.debug("Waiting for human input signal")
238
+
239
+ # Wait for signal (workflow is paused here)
240
+ result = await self.executor.wait_for_signal(
241
+ signal_name=request_id,
242
+ request_id=request_id,
243
+ workflow_id=request.workflow_id,
244
+ signal_description=request.description or request.prompt,
245
+ timeout_seconds=request.timeout_seconds,
246
+ signal_type=HumanInputResponse,
247
+ )
248
+
249
+ if isinstance(result, dict) and result.get("exit_requested", False):
250
+ raise PromptExitError(result.get("error", "User requested to exit FastAgent session"))
251
+ self.logger.debug("Received human input signal", data=result)
252
+ return result
253
+
254
+ async def list_tools(self) -> ListToolsResult:
255
+ """
256
+ List all tools available to this agent.
257
+
258
+ Returns:
259
+ ListToolsResult with available tools
260
+ """
261
+ if not self.initialized:
262
+ await self.initialize()
263
+
264
+ result = await super().list_tools()
265
+
266
+ if not self.human_input_callback:
267
+ return result
268
+
269
+ # Add a human_input_callback as a tool
270
+ from mcp.server.fastmcp.tools import Tool as FastTool
271
+
272
+ human_input_tool: FastTool = FastTool.from_function(self.request_human_input)
273
+ result.tools.append(
274
+ Tool(
275
+ name=HUMAN_INPUT_TOOL_NAME,
276
+ description=human_input_tool.description,
277
+ inputSchema=human_input_tool.parameters,
278
+ )
279
+ )
280
+
281
+ return result
282
+
283
+ async def call_tool(self, name: str, arguments: Dict[str, Any] | None = None) -> CallToolResult:
284
+ """
285
+ Call a tool by name with the given arguments.
286
+
287
+ Args:
288
+ name: Name of the tool to call
289
+ arguments: Arguments to pass to the tool
290
+
291
+ Returns:
292
+ Result of the tool call
293
+ """
294
+ if name == HUMAN_INPUT_TOOL_NAME:
295
+ # Call the human input tool
296
+ return await self._call_human_input_tool(arguments)
297
+ else:
298
+ return await super().call_tool(name, arguments)
299
+
300
+ async def _call_human_input_tool(
301
+ self, arguments: Dict[str, Any] | None = None
302
+ ) -> CallToolResult:
303
+ """
304
+ Handle human input request via tool calling.
305
+
306
+ Args:
307
+ arguments: Tool arguments
308
+
309
+ Returns:
310
+ Result of the human input request
311
+ """
312
+ # Handle human input request
313
+ try:
314
+ # Make sure arguments is not None
315
+ if arguments is None:
316
+ arguments = {}
317
+
318
+ # Extract request data
319
+ request_data = arguments.get("request")
320
+
321
+ # Handle both string and dict request formats
322
+ if isinstance(request_data, str):
323
+ request = HumanInputRequest(prompt=request_data)
324
+ elif isinstance(request_data, dict):
325
+ request = HumanInputRequest(**request_data)
326
+ else:
327
+ # Fallback for invalid or missing request data
328
+ request = HumanInputRequest(prompt="Please provide input:")
329
+
330
+ result = await self.request_human_input(request=request)
331
+
332
+ # Use response attribute if available, otherwise use the result directly
333
+ response_text = (
334
+ result.response if isinstance(result, HumanInputResponse) else str(result)
335
+ )
336
+
337
+ return CallToolResult(
338
+ content=[TextContent(type="text", text=f"Human response: {response_text}")]
339
+ )
340
+
341
+ except PromptExitError:
342
+ raise
343
+ except asyncio.TimeoutError as e:
344
+ return CallToolResult(
345
+ isError=True,
346
+ content=[
347
+ TextContent(
348
+ type="text",
349
+ text=f"Error: Human input request timed out: {str(e)}",
350
+ )
351
+ ],
352
+ )
353
+ except Exception as e:
354
+ import traceback
355
+
356
+ print(f"Error in _call_human_input_tool: {traceback.format_exc()}")
357
+
358
+ return CallToolResult(
359
+ isError=True,
360
+ content=[TextContent(type="text", text=f"Error requesting human input: {str(e)}")],
361
+ )
362
+
363
+ async def apply_prompt(self, prompt_name: str, arguments: Dict[str, str] | None = None) -> str:
364
+ """
365
+ Apply an MCP Server Prompt by name and return the assistant's response.
366
+ Will search all available servers for the prompt if not namespaced.
367
+
368
+ If the last message in the prompt is from a user, this will automatically
369
+ generate an assistant response to ensure we always end with an assistant message.
370
+
371
+ Args:
372
+ prompt_name: The name of the prompt to apply
373
+ arguments: Optional dictionary of string arguments to pass to the prompt template
374
+
375
+ Returns:
376
+ The assistant's response or error message
377
+ """
378
+
379
+ # Get the prompt - this will search all servers if needed
380
+ self.logger.debug(f"Loading prompt '{prompt_name}'")
381
+ prompt_result = await self.get_prompt(prompt_name, arguments)
382
+
383
+ if not prompt_result or not prompt_result.messages:
384
+ error_msg = f"Prompt '{prompt_name}' could not be found or contains no messages"
385
+ self.logger.warning(error_msg)
386
+ return error_msg
387
+
388
+ # Get the display name (namespaced version)
389
+ getattr(prompt_result, "namespaced_name", prompt_name)
390
+
391
+ # Convert prompt messages to multipart format
392
+ multipart_messages = PromptMessageMultipart.to_multipart(prompt_result.messages)
393
+
394
+ # Always call generate to ensure LLM implementations can handle prompt templates
395
+ # This is critical for stateful LLMs like PlaybackLLM
396
+ response = await self.generate(multipart_messages, None)
397
+ return response.first_text()
398
+
399
+ async def get_embedded_resources(
400
+ self, server_name: str, resource_name: str
401
+ ) -> List[EmbeddedResource]:
402
+ """
403
+ Get a resource from an MCP server and return it as a list of embedded resources ready for use in prompts.
404
+
405
+ Args:
406
+ server_name: Name of the MCP server to retrieve the resource from
407
+ resource_name: Name or URI of the resource to retrieve
408
+
409
+ Returns:
410
+ List of EmbeddedResource objects ready to use in a PromptMessageMultipart
411
+
412
+ Raises:
413
+ ValueError: If the server doesn't exist or the resource couldn't be found
414
+ """
415
+ # Get the raw resource result
416
+ result: ReadResourceResult = await super().get_resource(server_name, resource_name)
417
+
418
+ # Convert each resource content to an EmbeddedResource
419
+ embedded_resources: List[EmbeddedResource] = []
420
+ for resource_content in result.contents:
421
+ embedded_resource = EmbeddedResource(
422
+ type="resource", resource=resource_content, annotations=None
423
+ )
424
+ embedded_resources.append(embedded_resource)
425
+
426
+ return embedded_resources
427
+
428
+ async def with_resource(
429
+ self,
430
+ prompt_content: Union[str, PromptMessageMultipart],
431
+ server_name: str,
432
+ resource_name: str,
433
+ ) -> str:
434
+ """
435
+ Create a prompt with the given content and resource, then send it to the agent.
436
+
437
+ Args:
438
+ prompt_content: Either a string message or an existing PromptMessageMultipart
439
+ server_name: Name of the MCP server to retrieve the resource from
440
+ resource_name: Name or URI of the resource to retrieve
441
+
442
+ Returns:
443
+ The agent's response as a string
444
+ """
445
+ # Get the embedded resources
446
+ embedded_resources: List[EmbeddedResource] = await self.get_embedded_resources(
447
+ server_name, resource_name
448
+ )
449
+
450
+ # Create or update the prompt message
451
+ prompt: PromptMessageMultipart
452
+ if isinstance(prompt_content, str):
453
+ # Create a new prompt with the text and resources
454
+ content = [TextContent(type="text", text=prompt_content)]
455
+ content.extend(embedded_resources)
456
+ prompt = PromptMessageMultipart(role="user", content=content)
457
+ elif isinstance(prompt_content, PromptMessageMultipart):
458
+ # Add resources to the existing prompt
459
+ prompt = prompt_content
460
+ prompt.content.extend(embedded_resources)
461
+ else:
462
+ raise TypeError("prompt_content must be a string or PromptMessageMultipart")
463
+
464
+ response: PromptMessageMultipart = await self.generate([prompt], None)
465
+ return response.first_text()
466
+
467
+ async def generate(
468
+ self,
469
+ multipart_messages: List[PromptMessageMultipart],
470
+ request_params: RequestParams | None = None,
471
+ ) -> PromptMessageMultipart:
472
+ """
473
+ Create a completion with the LLM using the provided messages.
474
+ Delegates to the attached LLM.
475
+
476
+ Args:
477
+ multipart_messages: List of multipart messages to send to the LLM
478
+ request_params: Optional parameters to configure the request
479
+
480
+ Returns:
481
+ The LLM's response as a PromptMessageMultipart
482
+ """
483
+ assert self._llm
484
+ return await self._llm.generate(multipart_messages, request_params)
485
+
486
+ async def structured(
487
+ self,
488
+ prompt: List[PromptMessageMultipart],
489
+ model: Type[ModelT],
490
+ request_params: RequestParams | None = None,
491
+ ) -> ModelT | None:
492
+ """
493
+ Apply the prompt and return the result as a Pydantic model.
494
+ Delegates to the attached LLM.
495
+
496
+ Args:
497
+ prompt: List of PromptMessageMultipart objects
498
+ model: The Pydantic model class to parse the result into
499
+ request_params: Optional parameters to configure the LLM request
500
+
501
+ Returns:
502
+ An instance of the specified model, or None if coercion fails
503
+ """
504
+ assert self._llm
505
+ return await self._llm.structured(prompt, model, request_params)
506
+
507
+ async def apply_prompt_messages(
508
+ self, prompts: List[PromptMessageMultipart], request_params: RequestParams | None = None
509
+ ) -> str:
510
+ """
511
+ Apply a list of prompt messages and return the result.
512
+
513
+ Args:
514
+ prompts: List of PromptMessageMultipart messages
515
+ request_params: Optional request parameters
516
+
517
+ Returns:
518
+ The text response from the LLM
519
+ """
520
+
521
+ response = await self.generate(prompts, request_params)
522
+ return response.first_text()
@@ -0,0 +1 @@
1
+ # Workflow agents module
@@ -0,0 +1,173 @@
1
+ """
2
+ Chain workflow implementation using the clean BaseAgent adapter pattern.
3
+
4
+ This provides an implementation that delegates operations to a sequence of
5
+ other agents, chaining their outputs together.
6
+ """
7
+
8
+ from typing import Any, List, Optional, Type
9
+
10
+ from mcp.types import TextContent
11
+
12
+ from mcp_agent.agents.agent import Agent, AgentConfig
13
+ from mcp_agent.agents.base_agent import BaseAgent
14
+ from mcp_agent.core.prompt import Prompt
15
+ from mcp_agent.core.request_params import RequestParams
16
+ from mcp_agent.mcp.interfaces import ModelT
17
+ from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
18
+
19
+
20
+ class ChainAgent(BaseAgent):
21
+ """
22
+ A chain agent that processes requests through a series of specialized agents in sequence.
23
+ Passes the output of each agent to the next agent in the chain.
24
+ """
25
+
26
+ def __init__(
27
+ self,
28
+ config: AgentConfig,
29
+ agents: List[Agent],
30
+ cumulative: bool = False,
31
+ context: Optional[Any] = None,
32
+ **kwargs,
33
+ ) -> None:
34
+ """
35
+ Initialize a ChainAgent.
36
+
37
+ Args:
38
+ config: Agent configuration or name
39
+ agents: List of agents to chain together in sequence
40
+ cumulative: Whether each agent sees all previous responses
41
+ context: Optional context object
42
+ **kwargs: Additional keyword arguments to pass to BaseAgent
43
+ """
44
+ super().__init__(config, context=context, **kwargs)
45
+ self.agents = agents
46
+ self.cumulative = cumulative
47
+
48
+ async def generate(
49
+ self,
50
+ multipart_messages: List[PromptMessageMultipart],
51
+ request_params: Optional[RequestParams] = None,
52
+ ) -> PromptMessageMultipart:
53
+ """
54
+ Chain the request through multiple agents in sequence.
55
+
56
+ Args:
57
+ multipart_messages: Initial messages to send to the first agent
58
+ request_params: Optional request parameters
59
+
60
+ Returns:
61
+ The response from the final agent in the chain
62
+ """
63
+
64
+ # # Get the original user message (last message in the list)
65
+ user_message = multipart_messages[-1] if multipart_messages else None
66
+
67
+ # # If no user message, return an error
68
+ # if not user_message:
69
+ # return PromptMessageMultipart(
70
+ # role="assistant",
71
+ # content=[TextContent(type="text", text="No input message provided.")],
72
+ # )
73
+
74
+ # Initialize messages with the input
75
+
76
+ if not self.cumulative:
77
+ response: PromptMessageMultipart = await self.agents[0].generate(multipart_messages)
78
+ # Process the rest of the agents in the chain
79
+ for agent in self.agents[1:]:
80
+ next_message = Prompt.user(response.content)
81
+ response = await agent.generate([next_message])
82
+
83
+ return response
84
+
85
+ # Track all responses in the chain
86
+ all_responses: List[PromptMessageMultipart] = []
87
+
88
+ # Initialize list for storing formatted results
89
+ final_results: List[str] = []
90
+
91
+ # Add the original request with XML tag
92
+ request_text = f"<fastagent:request>{user_message.all_text()}</fastagent:request>"
93
+ final_results.append(request_text)
94
+
95
+ # Process through each agent in sequence
96
+ for i, agent in enumerate(self.agents):
97
+ # In cumulative mode, include the original message and all previous responses
98
+ chain_messages = multipart_messages.copy()
99
+ chain_messages.extend(all_responses)
100
+ current_response = await agent.generate(chain_messages, request_params)
101
+
102
+ # Store the response
103
+ all_responses.append(current_response)
104
+
105
+ response_text = current_response.all_text()
106
+ attributed_response = (
107
+ f"<fastagent:response agent='{agent.name}'>{response_text}</fastagent:response>"
108
+ )
109
+ final_results.append(attributed_response)
110
+
111
+ if i < len(self.agents) - 1:
112
+ [Prompt.user(current_response.all_text())]
113
+
114
+ # For cumulative mode, return the properly formatted output with XML tags
115
+ response_text = "\n\n".join(final_results)
116
+ return PromptMessageMultipart(
117
+ role="assistant",
118
+ content=[TextContent(type="text", text=response_text)],
119
+ )
120
+
121
+ async def structured(
122
+ self,
123
+ prompt: List[PromptMessageMultipart],
124
+ model: Type[ModelT],
125
+ request_params: Optional[RequestParams] = None,
126
+ ) -> Optional[ModelT]:
127
+ """
128
+ Chain the request through multiple agents and parse the final response.
129
+
130
+ Args:
131
+ prompt: List of messages to send through the chain
132
+ model: Pydantic model to parse the final response into
133
+ request_params: Optional request parameters
134
+
135
+ Returns:
136
+ The parsed response from the final agent, or None if parsing fails
137
+ """
138
+ # Generate response through the chain
139
+ response = await self.generate(prompt, request_params)
140
+
141
+ # Let the last agent in the chain try to parse the response
142
+ if self.agents:
143
+ last_agent = self.agents[-1]
144
+ try:
145
+ return await last_agent.structured([response], model, request_params)
146
+ except Exception as e:
147
+ self.logger.warning(f"Failed to parse response from chain: {str(e)}")
148
+ return None
149
+ return None
150
+
151
+ async def initialize(self) -> None:
152
+ """
153
+ Initialize the chain agent and all agents in the chain.
154
+ """
155
+ await super().initialize()
156
+
157
+ # Initialize all agents in the chain if not already initialized
158
+ for agent in self.agents:
159
+ if not getattr(agent, "initialized", False):
160
+ await agent.initialize()
161
+
162
+ async def shutdown(self) -> None:
163
+ """
164
+ Shutdown the chain agent and all agents in the chain.
165
+ """
166
+ await super().shutdown()
167
+
168
+ # Shutdown all agents in the chain
169
+ for agent in self.agents:
170
+ try:
171
+ await agent.shutdown()
172
+ except Exception as e:
173
+ self.logger.warning(f"Error shutting down agent in chain: {str(e)}")