fast-agent-mcp 0.2.3__py3-none-any.whl → 0.2.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. {fast_agent_mcp-0.2.3.dist-info → fast_agent_mcp-0.2.4.dist-info}/METADATA +12 -8
  2. {fast_agent_mcp-0.2.3.dist-info → fast_agent_mcp-0.2.4.dist-info}/RECORD +33 -30
  3. mcp_agent/__init__.py +2 -2
  4. mcp_agent/agents/agent.py +5 -0
  5. mcp_agent/agents/base_agent.py +158 -42
  6. mcp_agent/agents/workflow/chain_agent.py +9 -13
  7. mcp_agent/agents/workflow/evaluator_optimizer.py +3 -3
  8. mcp_agent/agents/workflow/orchestrator_agent.py +9 -7
  9. mcp_agent/agents/workflow/parallel_agent.py +2 -2
  10. mcp_agent/agents/workflow/router_agent.py +7 -5
  11. mcp_agent/core/{direct_agent_app.py → agent_app.py} +115 -15
  12. mcp_agent/core/direct_factory.py +12 -12
  13. mcp_agent/core/fastagent.py +17 -4
  14. mcp_agent/core/mcp_content.py +38 -5
  15. mcp_agent/core/prompt.py +70 -8
  16. mcp_agent/core/validation.py +1 -1
  17. mcp_agent/llm/augmented_llm.py +38 -8
  18. mcp_agent/llm/model_factory.py +17 -27
  19. mcp_agent/llm/providers/augmented_llm_openai.py +3 -3
  20. mcp_agent/llm/providers/multipart_converter_anthropic.py +8 -8
  21. mcp_agent/llm/providers/multipart_converter_openai.py +9 -9
  22. mcp_agent/mcp/helpers/__init__.py +3 -0
  23. mcp_agent/mcp/helpers/content_helpers.py +116 -0
  24. mcp_agent/mcp/interfaces.py +39 -16
  25. mcp_agent/mcp/mcp_aggregator.py +117 -13
  26. mcp_agent/mcp/prompt_message_multipart.py +29 -22
  27. mcp_agent/mcp/prompt_render.py +18 -15
  28. mcp_agent/mcp/prompts/prompt_helpers.py +22 -112
  29. mcp_agent/mcp_server/agent_server.py +2 -2
  30. mcp_agent/resources/examples/internal/history_transfer.py +35 -0
  31. {fast_agent_mcp-0.2.3.dist-info → fast_agent_mcp-0.2.4.dist-info}/WHEEL +0 -0
  32. {fast_agent_mcp-0.2.3.dist-info → fast_agent_mcp-0.2.4.dist-info}/entry_points.txt +0 -0
  33. {fast_agent_mcp-0.2.3.dist-info → fast_agent_mcp-0.2.4.dist-info}/licenses/LICENSE +0 -0
@@ -5,6 +5,7 @@ from typing import (
5
5
  Generic,
6
6
  List,
7
7
  Optional,
8
+ Tuple,
8
9
  Type,
9
10
  TypeVar,
10
11
  cast,
@@ -76,12 +77,24 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
76
77
  ProviderFormatConverter[MessageParamT, MessageT]
77
78
  ] = BasicFormatConverter,
78
79
  context: Optional["Context"] = None,
80
+ model: Optional[str] = None,
79
81
  **kwargs: dict[str, Any],
80
82
  ) -> None:
81
83
  """
82
84
  Initialize the LLM with a list of server names and an instruction.
83
85
  If a name is provided, it will be used to identify the LLM.
84
86
  If an agent is provided, all other properties are optional
87
+
88
+ Args:
89
+ agent: Optional Agent that owns this LLM
90
+ server_names: List of MCP server names to connect to
91
+ instruction: System prompt for the LLM
92
+ name: Optional name identifier for the LLM
93
+ request_params: RequestParams to configure LLM behavior
94
+ type_converter: Provider-specific format converter class
95
+ context: Application context
96
+ model: Optional model name override
97
+ **kwargs: Additional provider-specific parameters
85
98
  """
86
99
  # Extract request_params before super() call
87
100
  self._init_request_params = request_params
@@ -95,7 +108,7 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
95
108
  # memory contains provider specific API types.
96
109
  self.history: Memory[MessageParamT] = SimpleMemory[MessageParamT]()
97
110
 
98
- self.message_history: List[PromptMessageMultipart] = []
111
+ self._message_history: List[PromptMessageMultipart] = []
99
112
 
100
113
  # Initialize the display component
101
114
  self.display = ConsoleDisplay(config=self.context.config)
@@ -103,6 +116,10 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
103
116
  # Initialize default parameters
104
117
  self.default_request_params = self._initialize_default_params(kwargs)
105
118
 
119
+ # Apply model override if provided
120
+ if model:
121
+ self.default_request_params.model = model
122
+
106
123
  # Merge with provided params if any
107
124
  if self._init_request_params:
108
125
  self.default_request_params = self._merge_request_params(
@@ -127,17 +144,17 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
127
144
  prompt: List[PromptMessageMultipart],
128
145
  model: Type[ModelT],
129
146
  request_params: RequestParams | None = None,
130
- ) -> ModelT | None:
147
+ ) -> Tuple[ModelT | None, PromptMessageMultipart]:
131
148
  """Apply the prompt and return the result as a Pydantic model, or None if coercion fails"""
132
149
  try:
133
150
  result: PromptMessageMultipart = await self.generate(prompt, request_params)
134
151
  json_data = from_json(result.first_text(), allow_partial=True)
135
152
  validated_model = model.model_validate(json_data)
136
- return cast("ModelT", validated_model)
153
+ return cast("ModelT", validated_model), Prompt.assistant(json_data)
137
154
  except Exception as e:
138
155
  logger = get_logger(__name__)
139
156
  logger.error(f"Failed to parse structured response: {str(e)}")
140
- return None
157
+ return None, Prompt.assistant(f"Failed to parse structured response: {str(e)}")
141
158
 
142
159
  async def generate(
143
160
  self,
@@ -158,7 +175,7 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
158
175
  )
159
176
  return Prompt.assistant(f"History saved to {filename}")
160
177
 
161
- self.message_history.extend(multipart_messages)
178
+ self._message_history.extend(multipart_messages)
162
179
 
163
180
  if multipart_messages[-1].role == "user":
164
181
  self.show_user_message(
@@ -171,12 +188,12 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
171
188
  multipart_messages, request_params
172
189
  )
173
190
 
174
- self.message_history.append(assistant_response)
191
+ self._message_history.append(assistant_response)
175
192
  return assistant_response
176
193
 
177
194
  def chat_turn(self) -> int:
178
195
  """Return the current chat turn number"""
179
- return 1 + sum(1 for message in self.message_history if message.role == "assistant")
196
+ return 1 + sum(1 for message in self._message_history if message.role == "assistant")
180
197
 
181
198
  def _merge_request_params(
182
199
  self, default_params: RequestParams, provided_params: RequestParams
@@ -422,7 +439,7 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
422
439
  """
423
440
  # Convert to delimited format
424
441
  delimited_content = multipart_messages_to_delimited_format(
425
- self.message_history,
442
+ self._message_history,
426
443
  )
427
444
 
428
445
  # Write to file
@@ -448,3 +465,16 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
448
465
  String representation of the assistant's response if generated,
449
466
  or the last assistant message in the prompt
450
467
  """
468
+
469
+ @property
470
+ def message_history(self) -> List[PromptMessageMultipart]:
471
+ """
472
+ Return the agent's message history as PromptMessageMultipart objects.
473
+
474
+ This history can be used to transfer state between agents or for
475
+ analysis and debugging purposes.
476
+
477
+ Returns:
478
+ List of PromptMessageMultipart objects representing the conversation history
479
+ """
480
+ return self._message_history
@@ -186,38 +186,28 @@ class ModelFactory:
186
186
  else:
187
187
  llm_class = cls.PROVIDER_CLASSES[config.provider]
188
188
 
189
- # Create a factory function matching the attach_llm protocol
190
- def factory(agent: Agent, **kwargs) -> LLMClass:
191
- # Create merged params with parsed model name
192
- factory_params = request_params.model_copy() if request_params else RequestParams()
193
- factory_params.model = config.model_name # Use the parsed model name, not the alias
194
-
195
- # Merge with any provided default_request_params
196
- if "default_request_params" in kwargs and kwargs["default_request_params"]:
197
- params_dict = factory_params.model_dump()
198
- params_dict.update(kwargs["default_request_params"].model_dump(exclude_unset=True))
199
- factory_params = RequestParams(**params_dict)
200
- factory_params.model = (
201
- config.model_name
202
- ) # Ensure parsed model name isn't overwritten
203
-
204
- # Forward all keyword arguments to LLM constructor
189
+ # Create a factory function matching the updated attach_llm protocol
190
+ def factory(
191
+ agent: Agent,
192
+ request_params: Optional[RequestParams] = None,
193
+ **kwargs
194
+ ) -> AugmentedLLMProtocol:
195
+ # Create base params with parsed model name
196
+ base_params = RequestParams()
197
+ base_params.model = config.model_name # Use the parsed model name, not the alias
198
+
199
+ # Add reasoning effort if available
200
+ if config.reasoning_effort:
201
+ kwargs["reasoning_effort"] = config.reasoning_effort.value
202
+
203
+ # Forward all arguments to LLM constructor
205
204
  llm_args = {
206
205
  "agent": agent,
207
206
  "model": config.model_name,
208
- "request_params": factory_params,
209
- "name": kwargs.get("name"),
207
+ "request_params": request_params,
208
+ **kwargs
210
209
  }
211
210
 
212
- # Add reasoning effort if available
213
- if config.reasoning_effort:
214
- llm_args["reasoning_effort"] = config.reasoning_effort.value
215
-
216
- # Forward all other kwargs (including verb)
217
- for key, value in kwargs.items():
218
- if key not in ["agent", "default_request_params", "name"]:
219
- llm_args[key] = value
220
-
221
211
  llm: AugmentedLLMProtocol = llm_class(**llm_args)
222
212
  return llm
223
213
 
@@ -1,5 +1,5 @@
1
1
  import os
2
- from typing import List, Type
2
+ from typing import List, Tuple, Type
3
3
 
4
4
  from mcp.types import (
5
5
  CallToolRequest,
@@ -377,7 +377,7 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
377
377
  prompt: List[PromptMessageMultipart],
378
378
  model: Type[ModelT],
379
379
  request_params: RequestParams | None = None,
380
- ) -> ModelT | None:
380
+ ) -> Tuple[ModelT | None, PromptMessageMultipart]:
381
381
  """
382
382
  Apply the prompt and return the result as a Pydantic model.
383
383
 
@@ -432,7 +432,7 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
432
432
 
433
433
  parsed_result = response[0].choices[0].message
434
434
  logger.debug("Successfully used OpenAI beta parse feature for structured output")
435
- return parsed_result.parsed
435
+ return parsed_result.parsed, Prompt.assistant(parsed_result.content)
436
436
 
437
437
  except (ImportError, AttributeError, NotImplementedError) as e:
438
438
  # Beta feature not available, log and continue to fallback
@@ -24,13 +24,7 @@ from mcp.types import (
24
24
  )
25
25
 
26
26
  from mcp_agent.logging.logger import get_logger
27
- from mcp_agent.mcp.mime_utils import (
28
- guess_mime_type,
29
- is_image_mime_type,
30
- is_text_mime_type,
31
- )
32
- from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
33
- from mcp_agent.mcp.prompts.prompt_helpers import (
27
+ from mcp_agent.mcp.helpers.content_helpers import (
34
28
  get_image_data,
35
29
  get_resource_uri,
36
30
  get_text,
@@ -38,6 +32,12 @@ from mcp_agent.mcp.prompts.prompt_helpers import (
38
32
  is_resource_content,
39
33
  is_text_content,
40
34
  )
35
+ from mcp_agent.mcp.mime_utils import (
36
+ guess_mime_type,
37
+ is_image_mime_type,
38
+ is_text_mime_type,
39
+ )
40
+ from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
41
41
  from mcp_agent.mcp.resource_utils import extract_title_from_uri
42
42
 
43
43
  _logger = get_logger("multipart_converter_anthropic")
@@ -450,4 +450,4 @@ class AnthropicConverter:
450
450
  # Add separate blocks directly to the message
451
451
  content_blocks.extend(separate_blocks)
452
452
 
453
- return MessageParam(role="user", content=content_blocks)
453
+ return MessageParam(role="user", content=content_blocks)
@@ -9,14 +9,7 @@ from mcp.types import (
9
9
  )
10
10
 
11
11
  from mcp_agent.logging.logger import get_logger
12
- from mcp_agent.mcp.mime_utils import (
13
- guess_mime_type,
14
- is_image_mime_type,
15
- is_text_mime_type,
16
- )
17
- from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
18
- from mcp_agent.mcp.prompts.prompt_helpers import (
19
- MessageContent,
12
+ from mcp_agent.mcp.helpers.content_helpers import (
20
13
  get_image_data,
21
14
  get_resource_uri,
22
15
  get_text,
@@ -24,6 +17,13 @@ from mcp_agent.mcp.prompts.prompt_helpers import (
24
17
  is_resource_content,
25
18
  is_text_content,
26
19
  )
20
+ from mcp_agent.mcp.mime_utils import (
21
+ guess_mime_type,
22
+ is_image_mime_type,
23
+ is_text_mime_type,
24
+ )
25
+ from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
26
+ from mcp_agent.mcp.prompts.prompt_helpers import MessageContent
27
27
  from mcp_agent.mcp.resource_utils import extract_title_from_uri
28
28
 
29
29
  _logger = get_logger("multipart_converter_openai")
@@ -462,4 +462,4 @@ class OpenAIConverter:
462
462
  # Single message case (text-only)
463
463
  messages.append(converted)
464
464
 
465
- return messages
465
+ return messages
@@ -0,0 +1,3 @@
1
+ """
2
+ Helper modules for working with MCP content.
3
+ """
@@ -0,0 +1,116 @@
1
+ """
2
+ Helper functions for working with content objects.
3
+
4
+ These utilities simplify extracting content from content structures
5
+ without repetitive type checking.
6
+ """
7
+
8
+ from typing import Optional, Union
9
+
10
+ from mcp.types import (
11
+ BlobResourceContents,
12
+ EmbeddedResource,
13
+ ImageContent,
14
+ TextContent,
15
+ TextResourceContents,
16
+ )
17
+
18
+
19
+ def get_text(content: Union[TextContent, ImageContent, EmbeddedResource]) -> Optional[str]:
20
+ """
21
+ Extract text content from a content object if available.
22
+
23
+ Args:
24
+ content: A content object (TextContent, ImageContent, or EmbeddedResource)
25
+
26
+ Returns:
27
+ The text content as a string or None if not a text content
28
+ """
29
+ if isinstance(content, TextContent):
30
+ return content.text
31
+
32
+ if isinstance(content, TextResourceContents):
33
+ return content.text
34
+
35
+ if isinstance(content, EmbeddedResource):
36
+ if isinstance(content.resource, TextResourceContents):
37
+ return content.resource.text
38
+
39
+ return None
40
+
41
+
42
+ def get_image_data(content: Union[TextContent, ImageContent, EmbeddedResource]) -> Optional[str]:
43
+ """
44
+ Extract image data from a content object if available.
45
+
46
+ Args:
47
+ content: A content object (TextContent, ImageContent, or EmbeddedResource)
48
+
49
+ Returns:
50
+ The image data as a base64 string or None if not an image content
51
+ """
52
+ if isinstance(content, ImageContent):
53
+ return content.data
54
+
55
+ if isinstance(content, EmbeddedResource):
56
+ if isinstance(content.resource, BlobResourceContents):
57
+ # This assumes the blob might be an image, which isn't always true
58
+ # Consider checking the mimeType if needed
59
+ return content.resource.blob
60
+
61
+ return None
62
+
63
+
64
+ def get_resource_uri(content: Union[TextContent, ImageContent, EmbeddedResource]) -> Optional[str]:
65
+ """
66
+ Extract resource URI from an EmbeddedResource if available.
67
+
68
+ Args:
69
+ content: A content object (TextContent, ImageContent, or EmbeddedResource)
70
+
71
+ Returns:
72
+ The resource URI as a string or None if not an embedded resource
73
+ """
74
+ if isinstance(content, EmbeddedResource):
75
+ return str(content.resource.uri)
76
+
77
+ return None
78
+
79
+
80
+ def is_text_content(content: Union[TextContent, ImageContent, EmbeddedResource]) -> bool:
81
+ """
82
+ Check if the content is text content.
83
+
84
+ Args:
85
+ content: A content object (TextContent, ImageContent, or EmbeddedResource)
86
+
87
+ Returns:
88
+ True if the content is TextContent, False otherwise
89
+ """
90
+ return isinstance(content, TextContent) or isinstance(content, TextResourceContents)
91
+
92
+
93
+ def is_image_content(content: Union[TextContent, ImageContent, EmbeddedResource]) -> bool:
94
+ """
95
+ Check if the content is image content.
96
+
97
+ Args:
98
+ content: A content object (TextContent, ImageContent, or EmbeddedResource)
99
+
100
+ Returns:
101
+ True if the content is ImageContent, False otherwise
102
+ """
103
+ return isinstance(content, ImageContent)
104
+
105
+
106
+ def is_resource_content(content: Union[TextContent, ImageContent, EmbeddedResource]) -> bool:
107
+ """
108
+ Check if the content is an embedded resource.
109
+
110
+ Args:
111
+ content: A content object (TextContent, ImageContent, or EmbeddedResource)
112
+
113
+ Returns:
114
+ True if the content is EmbeddedResource, False otherwise
115
+ """
116
+ return isinstance(content, EmbeddedResource)
@@ -10,6 +10,7 @@ from typing import (
10
10
  Callable,
11
11
  Dict,
12
12
  List,
13
+ Mapping,
13
14
  Optional,
14
15
  Protocol,
15
16
  Tuple,
@@ -21,10 +22,10 @@ from typing import (
21
22
 
22
23
  from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream
23
24
  from deprecated import deprecated
24
- from mcp import ClientSession, GetPromptResult, ReadResourceResult
25
+ from mcp import ClientSession
26
+ from mcp.types import GetPromptResult, Prompt, PromptMessage, ReadResourceResult
25
27
  from pydantic import BaseModel
26
28
 
27
- from mcp_agent.core.prompt import Prompt
28
29
  from mcp_agent.core.request_params import RequestParams
29
30
  from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
30
31
 
@@ -119,44 +120,66 @@ class AugmentedLLMProtocol(Protocol):
119
120
  """
120
121
  ...
121
122
 
123
+ @property
124
+ def message_history(self) -> List[PromptMessageMultipart]:
125
+ """
126
+ Return the LLM's message history as PromptMessageMultipart objects.
127
+
128
+ Returns:
129
+ List of PromptMessageMultipart objects representing the conversation history
130
+ """
131
+ ...
132
+
122
133
 
123
134
  class AgentProtocol(AugmentedLLMProtocol, Protocol):
124
135
  """Protocol defining the standard agent interface"""
125
136
 
126
137
  name: str
127
138
 
128
- async def __call__(self, message: Union[str, PromptMessageMultipart] | None = None) -> str:
139
+ async def __call__(self, message: Union[str, PromptMessage, PromptMessageMultipart]) -> str:
129
140
  """Make the agent callable for sending messages directly."""
130
141
  ...
131
142
 
132
- async def send(self, message: Union[str, PromptMessageMultipart]) -> str:
143
+ async def send(self, message: Union[str, PromptMessage, PromptMessageMultipart]) -> str:
133
144
  """Send a message to the agent and get a response"""
134
145
  ...
135
146
 
136
- async def prompt(self, default_prompt: str = "") -> str:
137
- """Start an interactive prompt session with the agent"""
138
- ...
139
-
140
147
  async def apply_prompt(self, prompt_name: str, arguments: Dict[str, str] | None = None) -> str:
141
148
  """Apply an MCP prompt template by name"""
142
149
  ...
143
150
 
144
- async def get_prompt(self, prompt_name: str) -> GetPromptResult: ...
151
+ async def get_prompt(
152
+ self,
153
+ prompt_name: str,
154
+ arguments: Dict[str, str] | None = None,
155
+ server_name: str | None = None,
156
+ ) -> GetPromptResult: ...
145
157
 
146
- async def list_prompts(self, server_name: str | None) -> Dict[str, List[Prompt]]: ...
158
+ async def list_prompts(self, server_name: str | None = None) -> Mapping[str, List[Prompt]]: ...
147
159
 
148
- async def get_resource(self, server_name: str, resource_uri: str) -> ReadResourceResult: ...
160
+ async def list_resources(self, server_name: str | None = None) -> Mapping[str, List[str]]: ...
161
+
162
+ async def get_resource(
163
+ self, resource_uri: str, server_name: str | None = None
164
+ ) -> ReadResourceResult:
165
+ """Get a resource from a specific server or search all servers"""
166
+ ...
149
167
 
150
168
  @deprecated
151
- async def generate_str(self, message: str, request_params: RequestParams | None) -> str:
152
- """Generate a response. Deprecated: please use send instead"""
169
+ async def generate_str(self, message: str, request_params: RequestParams | None = None) -> str:
170
+ """Generate a response. Deprecated: Use send(), generate() or structured() instead"""
171
+ ...
172
+
173
+ @deprecated
174
+ async def prompt(self, default_prompt: str = "") -> str:
175
+ """Start an interactive prompt session with the agent. Deprecated. Use agent_app.interactive() instead."""
153
176
  ...
154
177
 
155
178
  async def with_resource(
156
179
  self,
157
- prompt_content: Union[str, PromptMessageMultipart],
158
- server_name: str,
159
- resource_name: str,
180
+ prompt_content: Union[str, PromptMessage, PromptMessageMultipart],
181
+ resource_uri: str,
182
+ server_name: str | None = None,
160
183
  ) -> str:
161
184
  """Send a message with an attached MCP resource"""
162
185
  ...