fast-agent-mcp 0.2.2__py3-none-any.whl → 0.2.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. {fast_agent_mcp-0.2.2.dist-info → fast_agent_mcp-0.2.4.dist-info}/METADATA +12 -8
  2. {fast_agent_mcp-0.2.2.dist-info → fast_agent_mcp-0.2.4.dist-info}/RECORD +34 -31
  3. mcp_agent/__init__.py +2 -2
  4. mcp_agent/agents/agent.py +5 -0
  5. mcp_agent/agents/base_agent.py +158 -42
  6. mcp_agent/agents/workflow/chain_agent.py +9 -13
  7. mcp_agent/agents/workflow/evaluator_optimizer.py +3 -3
  8. mcp_agent/agents/workflow/orchestrator_agent.py +9 -7
  9. mcp_agent/agents/workflow/parallel_agent.py +2 -2
  10. mcp_agent/agents/workflow/router_agent.py +7 -5
  11. mcp_agent/core/{direct_agent_app.py → agent_app.py} +115 -15
  12. mcp_agent/core/direct_factory.py +12 -12
  13. mcp_agent/core/fastagent.py +17 -4
  14. mcp_agent/core/mcp_content.py +38 -5
  15. mcp_agent/core/prompt.py +70 -8
  16. mcp_agent/core/validation.py +1 -1
  17. mcp_agent/llm/augmented_llm.py +38 -8
  18. mcp_agent/llm/model_factory.py +17 -27
  19. mcp_agent/llm/providers/augmented_llm_generic.py +5 -4
  20. mcp_agent/llm/providers/augmented_llm_openai.py +3 -3
  21. mcp_agent/llm/providers/multipart_converter_anthropic.py +8 -8
  22. mcp_agent/llm/providers/multipart_converter_openai.py +9 -9
  23. mcp_agent/mcp/helpers/__init__.py +3 -0
  24. mcp_agent/mcp/helpers/content_helpers.py +116 -0
  25. mcp_agent/mcp/interfaces.py +39 -16
  26. mcp_agent/mcp/mcp_aggregator.py +117 -13
  27. mcp_agent/mcp/prompt_message_multipart.py +29 -22
  28. mcp_agent/mcp/prompt_render.py +18 -15
  29. mcp_agent/mcp/prompts/prompt_helpers.py +22 -112
  30. mcp_agent/mcp_server/agent_server.py +2 -2
  31. mcp_agent/resources/examples/internal/history_transfer.py +35 -0
  32. {fast_agent_mcp-0.2.2.dist-info → fast_agent_mcp-0.2.4.dist-info}/WHEEL +0 -0
  33. {fast_agent_mcp-0.2.2.dist-info → fast_agent_mcp-0.2.4.dist-info}/entry_points.txt +0 -0
  34. {fast_agent_mcp-0.2.2.dist-info → fast_agent_mcp-0.2.4.dist-info}/licenses/LICENSE +0 -0
@@ -5,7 +5,7 @@ This workflow provides an implementation that manages complex tasks by
5
5
  dynamically planning, delegating to specialized agents, and synthesizing results.
6
6
  """
7
7
 
8
- from typing import Any, Dict, List, Literal, Optional, Type
8
+ from typing import Any, Dict, List, Literal, Optional, Tuple, Type
9
9
 
10
10
  from mcp.types import TextContent
11
11
 
@@ -29,6 +29,7 @@ from mcp_agent.agents.workflow.orchestrator_prompts import (
29
29
  )
30
30
  from mcp_agent.core.agent_types import AgentConfig
31
31
  from mcp_agent.core.exceptions import AgentConfigError
32
+ from mcp_agent.core.prompt import Prompt
32
33
  from mcp_agent.core.request_params import RequestParams
33
34
  from mcp_agent.logging.logger import get_logger
34
35
  from mcp_agent.mcp.interfaces import ModelT
@@ -117,7 +118,7 @@ class OrchestratorAgent(BaseAgent):
117
118
  prompt: List[PromptMessageMultipart],
118
119
  model: Type[ModelT],
119
120
  request_params: Optional[RequestParams] = None,
120
- ) -> Optional[ModelT]:
121
+ ) -> Tuple[ModelT | None, PromptMessageMultipart]:
121
122
  """
122
123
  Execute an orchestration plan and parse the result into a structured format.
123
124
 
@@ -138,12 +139,11 @@ class OrchestratorAgent(BaseAgent):
138
139
  prompt_message = PromptMessageMultipart(
139
140
  role="user", content=[TextContent(type="text", text=result_text)]
140
141
  )
141
-
142
- # Use the LLM's structured parsing capability
142
+ assert self._llm
143
143
  return await self._llm.structured([prompt_message], model, request_params)
144
144
  except Exception as e:
145
145
  self.logger.warning(f"Failed to parse orchestration result: {str(e)}")
146
- return None
146
+ return None, Prompt.assistant(f"Failed to parse orchestration result: {str(e)}")
147
147
 
148
148
  async def initialize(self) -> None:
149
149
  """Initialize the orchestrator agent and worker agents."""
@@ -429,7 +429,8 @@ class OrchestratorAgent(BaseAgent):
429
429
  plan_msg = PromptMessageMultipart(
430
430
  role="user", content=[TextContent(type="text", text=prompt)]
431
431
  )
432
- return await self._llm.structured([plan_msg], Plan, request_params)
432
+ plan, _ = await self._llm.structured([plan_msg], Plan, request_params)
433
+ return plan
433
434
  except Exception as e:
434
435
  self.logger.error(f"Failed to parse plan: {str(e)}")
435
436
  return None
@@ -483,7 +484,8 @@ class OrchestratorAgent(BaseAgent):
483
484
  plan_msg = PromptMessageMultipart(
484
485
  role="user", content=[TextContent(type="text", text=prompt)]
485
486
  )
486
- return await self._llm.structured([plan_msg], NextStep, request_params)
487
+ next_step, _ = await self._llm.structured([plan_msg], NextStep, request_params)
488
+ return next_step
487
489
  except Exception as e:
488
490
  self.logger.error(f"Failed to parse next step: {str(e)}")
489
491
  return None
@@ -1,5 +1,5 @@
1
1
  import asyncio
2
- from typing import Any, List, Optional
2
+ from typing import Any, List, Optional, Tuple
3
3
 
4
4
  from mcp.types import TextContent
5
5
 
@@ -113,7 +113,7 @@ class ParallelAgent(BaseAgent):
113
113
  prompt: List[PromptMessageMultipart],
114
114
  model: type[ModelT],
115
115
  request_params: Optional[RequestParams] = None,
116
- ) -> Optional[ModelT]:
116
+ ) -> Tuple[ModelT | None, PromptMessageMultipart]:
117
117
  """
118
118
  Apply the prompt and return the result as a Pydantic model.
119
119
 
@@ -5,7 +5,7 @@ This provides a simplified implementation that routes messages to agents
5
5
  by determining the best agent for a request and dispatching to it.
6
6
  """
7
7
 
8
- from typing import TYPE_CHECKING, List, Optional, Type
8
+ from typing import TYPE_CHECKING, List, Optional, Tuple, Type
9
9
 
10
10
  from mcp.types import TextContent
11
11
  from pydantic import BaseModel
@@ -14,6 +14,7 @@ from mcp_agent.agents.agent import Agent
14
14
  from mcp_agent.agents.base_agent import BaseAgent
15
15
  from mcp_agent.core.agent_types import AgentConfig
16
16
  from mcp_agent.core.exceptions import AgentConfigError
17
+ from mcp_agent.core.prompt import Prompt
17
18
  from mcp_agent.core.request_params import RequestParams
18
19
  from mcp_agent.logging.logger import get_logger
19
20
  from mcp_agent.mcp.interfaces import ModelT
@@ -73,7 +74,7 @@ class RoutingResponse(BaseModel):
73
74
  class RouterResult(BaseModel):
74
75
  """Router result with agent reference and confidence rating."""
75
76
 
76
- result: Agent
77
+ result: BaseAgent
77
78
  confidence: str
78
79
  reasoning: Optional[str] = None
79
80
 
@@ -221,7 +222,7 @@ class RouterAgent(BaseAgent):
221
222
  prompt: List[PromptMessageMultipart],
222
223
  model: Type[ModelT],
223
224
  request_params: Optional[RequestParams] = None,
224
- ) -> Optional[ModelT]:
225
+ ) -> Tuple[ModelT | None, PromptMessageMultipart]:
225
226
  """
226
227
  Route the request to the most appropriate agent and parse its response.
227
228
 
@@ -236,7 +237,7 @@ class RouterAgent(BaseAgent):
236
237
  routing_result = await self._get_routing_result(prompt)
237
238
 
238
239
  if not routing_result:
239
- return None
240
+ return None, Prompt.assistant("No routing result")
240
241
 
241
242
  # Get the selected agent
242
243
  selected_agent = routing_result.result
@@ -287,7 +288,8 @@ class RouterAgent(BaseAgent):
287
288
  )
288
289
 
289
290
  # Get structured response from LLM
290
- response = await self._llm.structured(
291
+ assert self._llm
292
+ response, _ = await self._llm.structured(
291
293
  [prompt], RoutingResponse, self._default_request_params
292
294
  )
293
295
 
@@ -2,14 +2,17 @@
2
2
  Direct AgentApp implementation for interacting with agents without proxies.
3
3
  """
4
4
 
5
- from typing import Dict, Optional, Union
5
+ from typing import Dict, List, Optional, Union
6
+
7
+ from deprecated import deprecated
8
+ from mcp.types import PromptMessage
6
9
 
7
10
  from mcp_agent.agents.agent import Agent
8
11
  from mcp_agent.core.interactive_prompt import InteractivePrompt
9
12
  from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
10
13
 
11
14
 
12
- class DirectAgentApp:
15
+ class AgentApp:
13
16
  """
14
17
  Container for active agents that provides a simple API for interacting with them.
15
18
  This implementation works directly with Agent instances without proxies.
@@ -44,7 +47,7 @@ class DirectAgentApp:
44
47
 
45
48
  async def __call__(
46
49
  self,
47
- message: Union[str, PromptMessageMultipart] | None = None,
50
+ message: Union[str, PromptMessage, PromptMessageMultipart] | None = None,
48
51
  agent_name: str | None = None,
49
52
  default_prompt: str = "",
50
53
  ) -> str:
@@ -53,9 +56,12 @@ class DirectAgentApp:
53
56
  This mirrors the FastAgent implementation that allowed agent("message").
54
57
 
55
58
  Args:
56
- message: The message to send
59
+ message: Message content in various formats:
60
+ - String: Converted to a user PromptMessageMultipart
61
+ - PromptMessage: Converted to PromptMessageMultipart
62
+ - PromptMessageMultipart: Used directly
57
63
  agent_name: Optional name of the agent to send to (defaults to first agent)
58
- default: Default message to use in interactive prompt mode
64
+ default_prompt: Default message to use in interactive prompt mode
59
65
 
60
66
  Returns:
61
67
  The agent's response as a string or the result of the interactive session
@@ -63,14 +69,21 @@ class DirectAgentApp:
63
69
  if message:
64
70
  return await self._agent(agent_name).send(message)
65
71
 
66
- return await self._agent(agent_name).prompt(default_prompt=default_prompt)
72
+ return await self.interactive(agent_name=agent_name, default_prompt=default_prompt)
67
73
 
68
- async def send(self, message: str, agent_name: Optional[str] = None) -> str:
74
+ async def send(
75
+ self,
76
+ message: Union[str, PromptMessage, PromptMessageMultipart],
77
+ agent_name: Optional[str] = None,
78
+ ) -> str:
69
79
  """
70
80
  Send a message to the specified agent (or to all agents).
71
81
 
72
82
  Args:
73
- message: The message to send
83
+ message: Message content in various formats:
84
+ - String: Converted to a user PromptMessageMultipart
85
+ - PromptMessage: Converted to PromptMessageMultipart
86
+ - PromptMessageMultipart: Used directly
74
87
  agent_name: Optional name of the agent to send to
75
88
 
76
89
  Returns:
@@ -97,32 +110,119 @@ class DirectAgentApp:
97
110
 
98
111
  Args:
99
112
  prompt_name: Name of the prompt template to apply
100
- agent_name: Name of the agent to send to
101
113
  arguments: Optional arguments for the prompt template
114
+ agent_name: Name of the agent to send to
102
115
 
103
116
  Returns:
104
117
  The agent's response as a string
105
118
  """
106
119
  return await self._agent(agent_name).apply_prompt(prompt_name, arguments)
107
120
 
108
- async def list_prompts(self, agent_name: str | None = None):
121
+ async def list_prompts(self, server_name: str | None = None, agent_name: str | None = None):
109
122
  """
110
123
  List available prompts for an agent.
111
124
 
112
125
  Args:
126
+ server_name: Optional name of the server to list prompts from
113
127
  agent_name: Name of the agent to list prompts for
114
128
 
115
129
  Returns:
116
130
  Dictionary mapping server names to lists of available prompts
117
131
  """
118
- return await self._agent(agent_name).list_prompts()
132
+ return await self._agent(agent_name).list_prompts(server_name=server_name)
133
+
134
+ async def get_prompt(
135
+ self,
136
+ prompt_name: str,
137
+ arguments: Dict[str, str] | None = None,
138
+ server_name: str | None = None,
139
+ agent_name: str | None = None,
140
+ ):
141
+ """
142
+ Get a prompt from a server.
143
+
144
+ Args:
145
+ prompt_name: Name of the prompt, optionally namespaced
146
+ arguments: Optional dictionary of arguments to pass to the prompt template
147
+ server_name: Optional name of the server to get the prompt from
148
+ agent_name: Name of the agent to use
119
149
 
120
- async def with_resource(self, user_prompt: str, server_name: str, resource_name: str) -> str:
121
- return await self._agent(None).with_resource(
122
- prompt_content=user_prompt, server_name=server_name, resource_name=resource_name
150
+ Returns:
151
+ GetPromptResult containing the prompt information
152
+ """
153
+ return await self._agent(agent_name).get_prompt(
154
+ prompt_name=prompt_name, arguments=arguments, server_name=server_name
123
155
  )
124
156
 
125
- async def prompt(self, agent_name: Optional[str] = None, default_prompt: str = "") -> str:
157
+ async def with_resource(
158
+ self,
159
+ prompt_content: Union[str, PromptMessage, PromptMessageMultipart],
160
+ resource_uri: str,
161
+ server_name: str | None = None,
162
+ agent_name: str | None = None,
163
+ ) -> str:
164
+ """
165
+ Send a message with an attached MCP resource.
166
+
167
+ Args:
168
+ prompt_content: Content in various formats (String, PromptMessage, or PromptMessageMultipart)
169
+ resource_uri: URI of the resource to retrieve
170
+ server_name: Optional name of the MCP server to retrieve the resource from
171
+ agent_name: Name of the agent to use
172
+
173
+ Returns:
174
+ The agent's response as a string
175
+ """
176
+ return await self._agent(agent_name).with_resource(
177
+ prompt_content=prompt_content, resource_uri=resource_uri, server_name=server_name
178
+ )
179
+
180
+ async def list_resources(
181
+ self,
182
+ server_name: str | None = None,
183
+ agent_name: str | None = None,
184
+ ) -> Dict[str, List[str]]:
185
+ """
186
+ List available resources from one or all servers.
187
+
188
+ Args:
189
+ server_name: Optional server name to list resources from
190
+ agent_name: Name of the agent to use
191
+
192
+ Returns:
193
+ Dictionary mapping server names to lists of resource URIs
194
+ """
195
+ return await self._agent(agent_name).list_resources(server_name=server_name)
196
+
197
+ async def get_resource(
198
+ self,
199
+ resource_uri: str,
200
+ server_name: str | None = None,
201
+ agent_name: str | None = None,
202
+ ):
203
+ """
204
+ Get a resource from an MCP server.
205
+
206
+ Args:
207
+ resource_uri: URI of the resource to retrieve
208
+ server_name: Optional name of the MCP server to retrieve the resource from
209
+ agent_name: Name of the agent to use
210
+
211
+ Returns:
212
+ ReadResourceResult object containing the resource content
213
+ """
214
+ return await self._agent(agent_name).get_resource(
215
+ resource_uri=resource_uri, server_name=server_name
216
+ )
217
+
218
+ @deprecated
219
+ async def prompt(self, agent_name: str | None = None, default_prompt: str = "") -> str:
220
+ """
221
+ Deprecated - use interactive() instead.
222
+ """
223
+ return await self.interactive(agent_name=agent_name, default_prompt=default_prompt)
224
+
225
+ async def interactive(self, agent_name: str | None = None, default_prompt: str = "") -> str:
126
226
  """
127
227
  Interactive prompt for sending messages with advanced features.
128
228
 
@@ -147,11 +147,11 @@ async def create_agents_by_type(
147
147
  await agent.initialize()
148
148
 
149
149
  # Attach LLM to the agent
150
- llm_factory = model_factory_func(
151
- model=config.model,
152
- request_params=config.default_request_params,
150
+ llm_factory = model_factory_func(model=config.model)
151
+ await agent.attach_llm(
152
+ llm_factory,
153
+ request_params=config.default_request_params
153
154
  )
154
- await agent.attach_llm(llm_factory)
155
155
  result_agents[name] = agent
156
156
 
157
157
  elif agent_type == AgentType.ORCHESTRATOR:
@@ -183,11 +183,11 @@ async def create_agents_by_type(
183
183
  await orchestrator.initialize()
184
184
 
185
185
  # Attach LLM to the orchestrator
186
- llm_factory = model_factory_func(
187
- model=config.model,
188
- request_params=config.default_request_params,
186
+ llm_factory = model_factory_func(model=config.model)
187
+ await orchestrator.attach_llm(
188
+ llm_factory,
189
+ request_params=config.default_request_params
189
190
  )
190
- await orchestrator.attach_llm(llm_factory)
191
191
 
192
192
  result_agents[name] = orchestrator
193
193
 
@@ -247,11 +247,11 @@ async def create_agents_by_type(
247
247
  await router.initialize()
248
248
 
249
249
  # Attach LLM to the router
250
- llm_factory = model_factory_func(
251
- model=config.model,
252
- request_params=config.default_request_params,
250
+ llm_factory = model_factory_func(model=config.model)
251
+ await router.attach_llm(
252
+ llm_factory,
253
+ request_params=config.default_request_params
253
254
  )
254
- await router.attach_llm(llm_factory)
255
255
  result_agents[name] = router
256
256
 
257
257
  elif agent_type == AgentType.CHAIN:
@@ -15,7 +15,7 @@ import yaml
15
15
  from mcp_agent.app import MCPApp
16
16
  from mcp_agent.config import Settings
17
17
  from mcp_agent.context import Context
18
- from mcp_agent.core.direct_agent_app import DirectAgentApp
18
+ from mcp_agent.core.agent_app import AgentApp
19
19
  from mcp_agent.core.direct_decorators import (
20
20
  agent as agent_decorator,
21
21
  )
@@ -110,8 +110,15 @@ class FastAgent:
110
110
 
111
111
  self.name = name
112
112
  self.config_path = config_path
113
- self._load_config()
114
-
113
+ try:
114
+ self._load_config()
115
+ except yaml.parser.ParserError as e:
116
+ handle_error(
117
+ e,
118
+ "YAML Parsing Error",
119
+ "There was an error parsing the config or secrets YAML configuration file.",
120
+ )
121
+ raise SystemExit(1)
115
122
  # Create the MCPApp with the config
116
123
  self.app = MCPApp(
117
124
  name=name,
@@ -196,7 +203,7 @@ class FastAgent:
196
203
  )
197
204
 
198
205
  # Create a wrapper with all agents for simplified access
199
- wrapper = DirectAgentApp(active_agents)
206
+ wrapper = AgentApp(active_agents)
200
207
 
201
208
  # Handle direct message sending if --agent and --message are provided
202
209
  if hasattr(self, "args") and self.args.agent and self.args.message:
@@ -297,6 +304,12 @@ class FastAgent:
297
304
  e,
298
305
  "User requested exit",
299
306
  )
307
+ elif isinstance(e, asyncio.CancelledError):
308
+ handle_error(
309
+ e,
310
+ "Cancelled",
311
+ "The operation was cancelled.",
312
+ )
300
313
  else:
301
314
  handle_error(e, error_type or "Error", "An unexpected error occurred.")
302
315
 
@@ -14,6 +14,8 @@ from mcp.types import (
14
14
  BlobResourceContents,
15
15
  EmbeddedResource,
16
16
  ImageContent,
17
+ ReadResourceResult,
18
+ ResourceContents,
17
19
  TextContent,
18
20
  TextResourceContents,
19
21
  )
@@ -25,6 +27,9 @@ from mcp_agent.mcp.mime_utils import (
25
27
  is_image_mime_type,
26
28
  )
27
29
 
30
+ # Type for all MCP content types
31
+ MCPContentType = Union[TextContent, ImageContent, EmbeddedResource, ResourceContents]
32
+
28
33
 
29
34
  def MCPText(
30
35
  text: str,
@@ -147,7 +152,8 @@ def MCPFile(
147
152
 
148
153
 
149
154
  def MCPPrompt(
150
- *content_items: Union[dict, str, Path, bytes], role: Literal["user", "assistant"] = "user"
155
+ *content_items: Union[dict, str, Path, bytes, MCPContentType, 'EmbeddedResource', 'ReadResourceResult'],
156
+ role: Literal["user", "assistant"] = "user"
151
157
  ) -> List[dict]:
152
158
  """
153
159
  Create one or more prompt messages with various content types.
@@ -158,6 +164,11 @@ def MCPPrompt(
158
164
  - File paths with text mime types or other mime types become EmbeddedResource
159
165
  - Dicts with role and content are passed through unchanged
160
166
  - Raw bytes become ImageContent
167
+ - TextContent objects are used directly
168
+ - ImageContent objects are used directly
169
+ - EmbeddedResource objects are used directly
170
+ - ResourceContent objects are wrapped in EmbeddedResource
171
+ - ReadResourceResult objects are expanded into multiple messages
161
172
 
162
173
  Args:
163
174
  *content_items: Content items of various types
@@ -173,9 +184,9 @@ def MCPPrompt(
173
184
  # Already a fully formed message
174
185
  result.append(item)
175
186
  elif isinstance(item, str):
176
- # Simple text content (that's not a file path)
187
+ # Simple text content
177
188
  result.append(MCPText(item, role=role))
178
- elif isinstance(item, Path) or isinstance(item, str):
189
+ elif isinstance(item, Path):
179
190
  # File path - determine the content type based on mime type
180
191
  path_str = str(item)
181
192
  mime_type = guess_mime_type(path_str)
@@ -189,6 +200,28 @@ def MCPPrompt(
189
200
  elif isinstance(item, bytes):
190
201
  # Raw binary data, assume image
191
202
  result.append(MCPImage(data=item, role=role))
203
+ elif isinstance(item, TextContent):
204
+ # Already a TextContent, wrap in a message
205
+ result.append({"role": role, "content": item})
206
+ elif isinstance(item, ImageContent):
207
+ # Already an ImageContent, wrap in a message
208
+ result.append({"role": role, "content": item})
209
+ elif isinstance(item, EmbeddedResource):
210
+ # Already an EmbeddedResource, wrap in a message
211
+ result.append({"role": role, "content": item})
212
+ elif hasattr(item, 'type') and item.type == 'resource' and hasattr(item, 'resource'):
213
+ # Looks like an EmbeddedResource but may not be the exact class
214
+ result.append({"role": role, "content": EmbeddedResource(type="resource", resource=item.resource)})
215
+ elif isinstance(item, ResourceContents):
216
+ # It's a ResourceContents, wrap it in an EmbeddedResource
217
+ result.append({"role": role, "content": EmbeddedResource(type="resource", resource=item)})
218
+ elif isinstance(item, ReadResourceResult):
219
+ # It's a ReadResourceResult, convert each resource content
220
+ for resource_content in item.contents:
221
+ result.append({
222
+ "role": role,
223
+ "content": EmbeddedResource(type="resource", resource=resource_content)
224
+ })
192
225
  else:
193
226
  # Try to convert to string
194
227
  result.append(MCPText(str(item), role=role))
@@ -196,12 +229,12 @@ def MCPPrompt(
196
229
  return result
197
230
 
198
231
 
199
- def User(*content_items) -> List[dict]:
232
+ def User(*content_items: Union[dict, str, Path, bytes, MCPContentType, 'EmbeddedResource', 'ReadResourceResult']) -> List[dict]:
200
233
  """Create user message(s) with various content types."""
201
234
  return MCPPrompt(*content_items, role="user")
202
235
 
203
236
 
204
- def Assistant(*content_items) -> List[dict]:
237
+ def Assistant(*content_items: Union[dict, str, Path, bytes, MCPContentType, 'EmbeddedResource', 'ReadResourceResult']) -> List[dict]:
205
238
  """Create assistant message(s) with various content types."""
206
239
  return MCPPrompt(*content_items, role="assistant")
207
240
 
mcp_agent/core/prompt.py CHANGED
@@ -2,14 +2,15 @@
2
2
  Prompt class for easily creating and working with MCP prompt content.
3
3
  """
4
4
 
5
- from typing import List, Literal
5
+ from pathlib import Path
6
+ from typing import List, Literal, Union
6
7
 
7
8
  from mcp.types import PromptMessage
8
9
 
9
10
  from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
10
11
 
11
12
  # Import our content helper functions
12
- from .mcp_content import Assistant, MCPPrompt, User
13
+ from .mcp_content import Assistant, MCPContentType, MCPPrompt, User
13
14
 
14
15
 
15
16
  class Prompt:
@@ -25,34 +26,75 @@ class Prompt:
25
26
  - Strings become TextContent
26
27
  - Image file paths become ImageContent
27
28
  - Other file paths become EmbeddedResource
29
+ - TextContent objects are used directly
30
+ - ImageContent objects are used directly
31
+ - EmbeddedResource objects are used directly
28
32
  - Pre-formatted messages pass through unchanged
29
33
  """
30
34
 
31
35
  @classmethod
32
- def user(cls, *content_items) -> PromptMessageMultipart:
36
+ def user(cls, *content_items: Union[str, Path, bytes, dict, MCPContentType, PromptMessage, PromptMessageMultipart]) -> PromptMessageMultipart:
33
37
  """
34
38
  Create a user PromptMessageMultipart with various content items.
35
39
 
36
40
  Args:
37
- *content_items: Content items (strings, file paths, etc.)
41
+ *content_items: Content items in various formats:
42
+ - Strings: Converted to TextContent
43
+ - Path objects: Converted based on file type (image/text/binary)
44
+ - Bytes: Treated as image data
45
+ - Dicts with role/content: Content extracted
46
+ - TextContent: Used directly
47
+ - ImageContent: Used directly
48
+ - EmbeddedResource: Used directly
49
+ - PromptMessage: Content extracted
50
+ - PromptMessageMultipart: Content extracted with role changed to user
38
51
 
39
52
  Returns:
40
53
  A PromptMessageMultipart with user role and the specified content
41
54
  """
55
+ # Handle PromptMessage and PromptMessageMultipart directly
56
+ if len(content_items) == 1:
57
+ item = content_items[0]
58
+ if isinstance(item, PromptMessage):
59
+ return PromptMessageMultipart(role="user", content=[item.content])
60
+ elif isinstance(item, PromptMessageMultipart):
61
+ # Keep the content but change role to user
62
+ return PromptMessageMultipart(role="user", content=item.content)
63
+
64
+ # Use the original implementation for other types
42
65
  messages = User(*content_items)
43
66
  return PromptMessageMultipart(role="user", content=[msg["content"] for msg in messages])
44
67
 
45
68
  @classmethod
46
- def assistant(cls, *content_items) -> PromptMessageMultipart:
69
+ def assistant(cls, *content_items: Union[str, Path, bytes, dict, MCPContentType, PromptMessage, PromptMessageMultipart]) -> PromptMessageMultipart:
47
70
  """
48
71
  Create an assistant PromptMessageMultipart with various content items.
49
72
 
50
73
  Args:
51
- *content_items: Content items (strings, file paths, etc.)
74
+ *content_items: Content items in various formats:
75
+ - Strings: Converted to TextContent
76
+ - Path objects: Converted based on file type (image/text/binary)
77
+ - Bytes: Treated as image data
78
+ - Dicts with role/content: Content extracted
79
+ - TextContent: Used directly
80
+ - ImageContent: Used directly
81
+ - EmbeddedResource: Used directly
82
+ - PromptMessage: Content extracted
83
+ - PromptMessageMultipart: Content extracted with role changed to assistant
52
84
 
53
85
  Returns:
54
86
  A PromptMessageMultipart with assistant role and the specified content
55
87
  """
88
+ # Handle PromptMessage and PromptMessageMultipart directly
89
+ if len(content_items) == 1:
90
+ item = content_items[0]
91
+ if isinstance(item, PromptMessage):
92
+ return PromptMessageMultipart(role="assistant", content=[item.content])
93
+ elif isinstance(item, PromptMessageMultipart):
94
+ # Keep the content but change role to assistant
95
+ return PromptMessageMultipart(role="assistant", content=item.content)
96
+
97
+ # Use the original implementation for other types
56
98
  messages = Assistant(*content_items)
57
99
  return PromptMessageMultipart(
58
100
  role="assistant", content=[msg["content"] for msg in messages]
@@ -60,18 +102,38 @@ class Prompt:
60
102
 
61
103
  @classmethod
62
104
  def message(
63
- cls, *content_items, role: Literal["user", "assistant"] = "user"
105
+ cls, *content_items: Union[str, Path, bytes, dict, MCPContentType, PromptMessage, PromptMessageMultipart],
106
+ role: Literal["user", "assistant"] = "user"
64
107
  ) -> PromptMessageMultipart:
65
108
  """
66
109
  Create a PromptMessageMultipart with the specified role and content items.
67
110
 
68
111
  Args:
69
- *content_items: Content items (strings, file paths, etc.)
112
+ *content_items: Content items in various formats:
113
+ - Strings: Converted to TextContent
114
+ - Path objects: Converted based on file type (image/text/binary)
115
+ - Bytes: Treated as image data
116
+ - Dicts with role/content: Content extracted
117
+ - TextContent: Used directly
118
+ - ImageContent: Used directly
119
+ - EmbeddedResource: Used directly
120
+ - PromptMessage: Content extracted
121
+ - PromptMessageMultipart: Content extracted with role changed as specified
70
122
  role: Role for the message (user or assistant)
71
123
 
72
124
  Returns:
73
125
  A PromptMessageMultipart with the specified role and content
74
126
  """
127
+ # Handle PromptMessage and PromptMessageMultipart directly
128
+ if len(content_items) == 1:
129
+ item = content_items[0]
130
+ if isinstance(item, PromptMessage):
131
+ return PromptMessageMultipart(role=role, content=[item.content])
132
+ elif isinstance(item, PromptMessageMultipart):
133
+ # Keep the content but change role as specified
134
+ return PromptMessageMultipart(role=role, content=item.content)
135
+
136
+ # Use the original implementation for other types
75
137
  messages = MCPPrompt(*content_items, role=role)
76
138
  return PromptMessageMultipart(
77
139
  role=messages[0]["role"] if messages else role,
@@ -231,7 +231,7 @@ def get_dependencies_groups(
231
231
  dependencies[name].update(agent_data.get("parallel_agents", []))
232
232
  elif agent_type == AgentType.CHAIN.value:
233
233
  # Chain agents depend on the agents in their sequence
234
- dependencies[name].update(agent_data.get("chain_agents", []))
234
+ dependencies[name].update(agent_data.get("sequence", []))
235
235
  elif agent_type == AgentType.ROUTER.value:
236
236
  # Router agents depend on the agents they route to
237
237
  dependencies[name].update(agent_data.get("router_agents", []))