fast-agent-mcp 0.1.7__py3-none-any.whl → 0.1.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. {fast_agent_mcp-0.1.7.dist-info → fast_agent_mcp-0.1.9.dist-info}/METADATA +37 -9
  2. {fast_agent_mcp-0.1.7.dist-info → fast_agent_mcp-0.1.9.dist-info}/RECORD +53 -31
  3. {fast_agent_mcp-0.1.7.dist-info → fast_agent_mcp-0.1.9.dist-info}/entry_points.txt +1 -0
  4. mcp_agent/agents/agent.py +5 -11
  5. mcp_agent/core/agent_app.py +125 -44
  6. mcp_agent/core/decorators.py +3 -2
  7. mcp_agent/core/enhanced_prompt.py +106 -20
  8. mcp_agent/core/factory.py +28 -66
  9. mcp_agent/core/fastagent.py +13 -3
  10. mcp_agent/core/mcp_content.py +222 -0
  11. mcp_agent/core/prompt.py +132 -0
  12. mcp_agent/core/proxies.py +41 -36
  13. mcp_agent/human_input/handler.py +4 -1
  14. mcp_agent/logging/transport.py +30 -3
  15. mcp_agent/mcp/mcp_aggregator.py +27 -22
  16. mcp_agent/mcp/mime_utils.py +69 -0
  17. mcp_agent/mcp/prompt_message_multipart.py +64 -0
  18. mcp_agent/mcp/prompt_serialization.py +447 -0
  19. mcp_agent/mcp/prompts/__init__.py +0 -0
  20. mcp_agent/mcp/prompts/__main__.py +10 -0
  21. mcp_agent/mcp/prompts/prompt_server.py +508 -0
  22. mcp_agent/mcp/prompts/prompt_template.py +469 -0
  23. mcp_agent/mcp/resource_utils.py +203 -0
  24. mcp_agent/resources/examples/internal/agent.py +1 -1
  25. mcp_agent/resources/examples/internal/fastagent.config.yaml +2 -2
  26. mcp_agent/resources/examples/internal/sizer.py +0 -5
  27. mcp_agent/resources/examples/prompting/__init__.py +3 -0
  28. mcp_agent/resources/examples/prompting/agent.py +23 -0
  29. mcp_agent/resources/examples/prompting/fastagent.config.yaml +44 -0
  30. mcp_agent/resources/examples/prompting/image_server.py +56 -0
  31. mcp_agent/resources/examples/researcher/researcher-eval.py +1 -1
  32. mcp_agent/resources/examples/workflows/orchestrator.py +5 -4
  33. mcp_agent/resources/examples/workflows/router.py +0 -2
  34. mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py +57 -87
  35. mcp_agent/workflows/llm/anthropic_utils.py +101 -0
  36. mcp_agent/workflows/llm/augmented_llm.py +155 -141
  37. mcp_agent/workflows/llm/augmented_llm_anthropic.py +135 -281
  38. mcp_agent/workflows/llm/augmented_llm_openai.py +175 -337
  39. mcp_agent/workflows/llm/augmented_llm_passthrough.py +104 -0
  40. mcp_agent/workflows/llm/augmented_llm_playback.py +109 -0
  41. mcp_agent/workflows/llm/model_factory.py +25 -6
  42. mcp_agent/workflows/llm/openai_utils.py +65 -0
  43. mcp_agent/workflows/llm/providers/__init__.py +8 -0
  44. mcp_agent/workflows/llm/providers/multipart_converter_anthropic.py +348 -0
  45. mcp_agent/workflows/llm/providers/multipart_converter_openai.py +426 -0
  46. mcp_agent/workflows/llm/providers/openai_multipart.py +197 -0
  47. mcp_agent/workflows/llm/providers/sampling_converter_anthropic.py +258 -0
  48. mcp_agent/workflows/llm/providers/sampling_converter_openai.py +229 -0
  49. mcp_agent/workflows/llm/sampling_format_converter.py +39 -0
  50. mcp_agent/workflows/orchestrator/orchestrator.py +62 -153
  51. mcp_agent/workflows/router/router_llm.py +18 -24
  52. mcp_agent/core/server_validation.py +0 -44
  53. mcp_agent/core/simulator_registry.py +0 -22
  54. mcp_agent/workflows/llm/enhanced_passthrough.py +0 -70
  55. {fast_agent_mcp-0.1.7.dist-info → fast_agent_mcp-0.1.9.dist-info}/WHEEL +0 -0
  56. {fast_agent_mcp-0.1.7.dist-info → fast_agent_mcp-0.1.9.dist-info}/licenses/LICENSE +0 -0
@@ -1,7 +1,6 @@
1
1
  from abc import abstractmethod
2
2
 
3
3
  from typing import (
4
- Any,
5
4
  Generic,
6
5
  List,
7
6
  Optional,
@@ -9,18 +8,30 @@ from typing import (
9
8
  Type,
10
9
  TypeVar,
11
10
  TYPE_CHECKING,
12
- Union,
13
11
  )
14
12
 
13
+ from mcp import CreateMessageResult, SamplingMessage
14
+ from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
15
+ from mcp_agent.workflows.llm.sampling_format_converter import (
16
+ SamplingFormatConverter,
17
+ MessageParamT,
18
+ MessageT,
19
+ )
20
+
21
+ # Forward reference for type annotations
22
+ if TYPE_CHECKING:
23
+ from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
24
+ from mcp_agent.agents.agent import Agent
25
+ from mcp_agent.context import Context
26
+
27
+
15
28
  from pydantic import Field
16
29
 
17
30
  from mcp.types import (
18
31
  CallToolRequest,
19
32
  CallToolResult,
20
33
  CreateMessageRequestParams,
21
- CreateMessageResult,
22
34
  ModelPreferences,
23
- SamplingMessage,
24
35
  PromptMessage,
25
36
  TextContent,
26
37
  GetPromptResult,
@@ -34,22 +45,10 @@ from mcp_agent.workflows.llm.llm_selector import ModelSelector
34
45
  from mcp_agent.ui.console_display import ConsoleDisplay
35
46
  from rich.text import Text
36
47
 
37
- if TYPE_CHECKING:
38
- from mcp_agent.agents.agent import Agent
39
- from mcp_agent.context import Context
40
-
41
- MessageParamT = TypeVar("MessageParamT")
42
- """A type representing an input message to an LLM."""
43
-
44
- MessageT = TypeVar("MessageT")
45
- """A type representing an output message from an LLM."""
46
48
 
47
49
  ModelT = TypeVar("ModelT")
48
50
  """A type representing a structured output message from an LLM."""
49
51
 
50
- # TODO: saqadri - SamplingMessage is fairly limiting - consider extending
51
- MCPMessageParam = SamplingMessage
52
- MCPMessageResult = CreateMessageResult
53
52
 
54
53
  # TODO -- move this to a constant
55
54
  HUMAN_INPUT_TOOL_NAME = "__human_input__"
@@ -218,25 +217,10 @@ class AugmentedLLMProtocol(Protocol, Generic[MessageParamT, MessageT]):
218
217
  ) -> ModelT:
219
218
  """Request a structured LLM generation and return the result as a Pydantic model."""
220
219
 
221
-
222
- class ProviderToMCPConverter(Protocol, Generic[MessageParamT, MessageT]):
223
- """Conversions between LLM provider and MCP types"""
224
-
225
- @classmethod
226
- def to_mcp_message_result(cls, result: MessageT) -> MCPMessageResult:
227
- """Convert an LLM response to an MCP message result type."""
228
-
229
- @classmethod
230
- def from_mcp_message_result(cls, result: MCPMessageResult) -> MessageT:
231
- """Convert an MCP message result to an LLM response type."""
232
-
233
- @classmethod
234
- def to_mcp_message_param(cls, param: MessageParamT) -> MCPMessageParam:
235
- """Convert an LLM input to an MCP message (SamplingMessage) type."""
236
-
237
- @classmethod
238
- def from_mcp_message_param(cls, param: MCPMessageParam) -> MessageParamT:
239
- """Convert an MCP message (SamplingMessage) to an LLM input type."""
220
+ async def generate_prompt(
221
+ self, prompt: PromptMessageMultipart, request_params: RequestParams | None
222
+ ) -> str:
223
+ """Request an LLM generation and return a string representation of the result"""
240
224
 
241
225
 
242
226
  class AugmentedLLM(ContextDependent, AugmentedLLMProtocol[MessageParamT, MessageT]):
@@ -259,7 +243,7 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol[MessageParamT, Message
259
243
  instruction: str | None = None,
260
244
  name: str | None = None,
261
245
  request_params: RequestParams | None = None,
262
- type_converter: Type[ProviderToMCPConverter[MessageParamT, MessageT]] = None,
246
+ type_converter: Type[SamplingFormatConverter[MessageParamT, MessageT]] = None,
263
247
  context: Optional["Context"] = None,
264
248
  **kwargs,
265
249
  ):
@@ -337,6 +321,10 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol[MessageParamT, Message
337
321
  ) -> ModelT:
338
322
  """Request a structured LLM generation and return the result as a Pydantic model."""
339
323
 
324
+ # aysnc def generate2_str(self, prompt: PromptMessageMultipart, request_params: RequestParams | None = None) -> List[MessageT]:
325
+ # """Request an LLM generation, which may run multiple iterations, and return the result"""
326
+ # return None
327
+
340
328
  async def select_model(
341
329
  self, request_params: RequestParams | None = None
342
330
  ) -> str | None:
@@ -381,10 +369,6 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol[MessageParamT, Message
381
369
  merged.update(provided_params.model_dump(exclude_unset=True))
382
370
  final_params = RequestParams(**merged)
383
371
 
384
- # self.logger.debug(
385
- # "Final merged params:", extra={"params": final_params.model_dump()}
386
- # )
387
-
388
372
  return final_params
389
373
 
390
374
  def get_request_params(
@@ -411,24 +395,24 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol[MessageParamT, Message
411
395
 
412
396
  return default_request_params
413
397
 
414
- def to_mcp_message_result(self, result: MessageT) -> MCPMessageResult:
398
+ def to_mcp_message_result(self, result: MessageT) -> CreateMessageResult:
415
399
  """Convert an LLM response to an MCP message result type."""
416
- return self.type_converter.to_mcp_message_result(result)
400
+ return self.type_converter.to_sampling_result(result)
417
401
 
418
- def from_mcp_message_result(self, result: MCPMessageResult) -> MessageT:
402
+ def from_mcp_message_result(self, result: CreateMessageResult) -> MessageT:
419
403
  """Convert an MCP message result to an LLM response type."""
420
- return self.type_converter.from_mcp_message_result(result)
404
+ return self.type_converter.from_sampling_result(result)
421
405
 
422
- def to_mcp_message_param(self, param: MessageParamT) -> MCPMessageParam:
406
+ def to_mcp_message_param(self, param: MessageParamT) -> SamplingMessage:
423
407
  """Convert an LLM input to an MCP message (SamplingMessage) type."""
424
- return self.type_converter.to_mcp_message_param(param)
408
+ return self.type_converter.to_sampling_message(param)
425
409
 
426
- def from_mcp_message_param(self, param: MCPMessageParam) -> MessageParamT:
410
+ def from_mcp_message_param(self, param: SamplingMessage) -> MessageParamT:
427
411
  """Convert an MCP message (SamplingMessage) to an LLM input type."""
428
- return self.type_converter.from_mcp_message_param(param)
412
+ return self.type_converter.from_sampling_message(param)
429
413
 
430
414
  def from_mcp_prompt_message(self, message: PromptMessage) -> MessageParamT:
431
- return self.type_converter.from_mcp_prompt_message(message)
415
+ return self.type_converter.from_prompt_message(message)
432
416
 
433
417
  @classmethod
434
418
  def convert_message_to_message_param(
@@ -567,7 +551,7 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol[MessageParamT, Message
567
551
  text_parts.append(part.text)
568
552
  if text_parts:
569
553
  return "\n".join(text_parts)
570
-
554
+
571
555
  # For objects with content attribute
572
556
  if hasattr(message, "content"):
573
557
  content = message.content
@@ -575,7 +559,7 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol[MessageParamT, Message
575
559
  return content
576
560
  elif hasattr(content, "text"):
577
561
  return content.text
578
-
562
+
579
563
  # Default fallback
580
564
  return str(message)
581
565
 
@@ -588,7 +572,7 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol[MessageParamT, Message
588
572
  result = self.message_param_str(message)
589
573
  if result != str(message):
590
574
  return result
591
-
575
+
592
576
  # Additional handling for output-specific formats
593
577
  if hasattr(message, "content"):
594
578
  content = message.content
@@ -600,7 +584,7 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol[MessageParamT, Message
600
584
  text_parts.append(block.text)
601
585
  if text_parts:
602
586
  return "\n".join(text_parts)
603
-
587
+
604
588
  # Default fallback
605
589
  return str(message)
606
590
 
@@ -650,7 +634,7 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol[MessageParamT, Message
650
634
  ):
651
635
  """
652
636
  Display information about a loaded prompt template.
653
-
637
+
654
638
  Args:
655
639
  prompt_name: The name of the prompt
656
640
  description: Optional description of the prompt
@@ -679,13 +663,13 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol[MessageParamT, Message
679
663
  prompt_name: The name of the prompt being applied
680
664
 
681
665
  Returns:
682
- String representation of the assistant's response if generated,
666
+ String representation of the assistant's response if generated,
683
667
  or the last assistant message in the prompt
684
668
  """
685
- prompt_messages: List[PromptMessage] = prompt_result.messages
686
-
669
+ from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
670
+
687
671
  # Check if we have any messages
688
- if not prompt_messages:
672
+ if not prompt_result.messages:
689
673
  return "Prompt contains no messages"
690
674
 
691
675
  # Extract arguments if they were stored in the result
@@ -695,105 +679,135 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol[MessageParamT, Message
695
679
  await self.show_prompt_loaded(
696
680
  prompt_name=prompt_name,
697
681
  description=prompt_result.description,
698
- message_count=len(prompt_messages),
682
+ message_count=len(prompt_result.messages),
699
683
  arguments=arguments,
700
684
  )
701
-
685
+
686
+ # Convert to PromptMessageMultipart objects
687
+ multipart_messages = PromptMessageMultipart.parse_get_prompt_result(
688
+ prompt_result
689
+ )
690
+
691
+ # Delegate to the provider-specific implementation
692
+ return await self._apply_prompt_template_provider_specific(multipart_messages)
693
+
694
+ async def _apply_prompt_template_provider_specific(
695
+ self, multipart_messages: List["PromptMessageMultipart"]
696
+ ) -> str:
697
+ """
698
+ Provider-specific implementation of apply_prompt_template.
699
+ This default implementation handles basic text content for any LLM type.
700
+ Provider-specific subclasses should override this method to handle
701
+ multimodal content appropriately.
702
+
703
+ Args:
704
+ multipart_messages: List of PromptMessageMultipart objects parsed from the prompt template
705
+
706
+ Returns:
707
+ String representation of the assistant's response if generated,
708
+ or the last assistant message in the prompt
709
+ """
702
710
  # Check the last message role
703
- last_message = prompt_messages[-1]
704
-
711
+ last_message = multipart_messages[-1]
712
+
705
713
  if last_message.role == "user":
706
714
  # For user messages: Add all previous messages to history, then generate response to the last one
707
- self.logger.debug("Last message in prompt is from user, generating assistant response")
708
-
715
+ self.logger.debug(
716
+ "Last message in prompt is from user, generating assistant response"
717
+ )
718
+
709
719
  # Add all but the last message to history
710
- if len(prompt_messages) > 1:
711
- previous_messages = prompt_messages[:-1]
720
+ if len(multipart_messages) > 1:
721
+ previous_messages = multipart_messages[:-1]
712
722
  converted = []
723
+
724
+ # Fallback generic method for all LLM types
713
725
  for msg in previous_messages:
714
- converted.append(self.type_converter.from_mcp_prompt_message(msg))
726
+ # Convert each PromptMessageMultipart to individual PromptMessages
727
+ prompt_messages = msg.to_prompt_messages()
728
+ for prompt_msg in prompt_messages:
729
+ converted.append(
730
+ self.type_converter.from_prompt_message(prompt_msg)
731
+ )
732
+
715
733
  self.history.extend(converted, is_prompt=True)
716
-
717
- # Extract the user's question and generate a response
718
- user_content = last_message.content
719
- user_text = user_content.text if hasattr(user_content, "text") else str(user_content)
720
-
734
+
735
+ # For generic LLMs, extract text and describe non-text content
736
+ user_text_parts = []
737
+ for content in last_message.content:
738
+ if content.type == "text":
739
+ user_text_parts.append(content.text)
740
+ elif content.type == "resource" and hasattr(content.resource, "text"):
741
+ user_text_parts.append(content.resource.text)
742
+ elif content.type == "image":
743
+ # Add a placeholder for images
744
+ mime_type = getattr(content, "mimeType", "image/unknown")
745
+ user_text_parts.append(f"[Image: {mime_type}]")
746
+
747
+ user_text = "\n".join(user_text_parts) if user_text_parts else ""
748
+ if not user_text:
749
+ # Fallback to original method if we couldn't extract text
750
+ user_text = str(last_message.content)
751
+
721
752
  return await self.generate_str(user_text)
722
753
  else:
723
754
  # For assistant messages: Add all messages to history and return the last one
724
- self.logger.debug("Last message in prompt is from assistant, returning it directly")
725
-
755
+ self.logger.debug(
756
+ "Last message in prompt is from assistant, returning it directly"
757
+ )
758
+
726
759
  # Convert and add all messages to history
727
760
  converted = []
728
- for msg in prompt_messages:
729
- converted.append(self.type_converter.from_mcp_prompt_message(msg))
730
- self.history.extend(converted, is_prompt=True)
731
-
732
- # Return the assistant's message
733
- content = last_message.content
734
- return content.text if hasattr(content, "text") else str(content)
735
-
736
-
737
761
 
738
- class PassthroughLLM(AugmentedLLM):
739
- """
740
- A specialized LLM implementation that simply passes through input messages without modification.
741
-
742
- This is useful for cases where you need an object with the AugmentedLLM interface
743
- but want to preserve the original message without any processing, such as in a
744
- parallel workflow where no fan-in aggregation is needed.
745
- """
746
-
747
- def __init__(self, name: str = "Passthrough", context=None, **kwargs):
748
- super().__init__(name=name, context=context, **kwargs)
762
+ # Fallback to the original method for all LLM types
763
+ for msg in multipart_messages:
764
+ # Convert each PromptMessageMultipart to individual PromptMessages
765
+ prompt_messages = msg.to_prompt_messages()
766
+ for prompt_msg in prompt_messages:
767
+ converted.append(
768
+ self.type_converter.from_prompt_message(prompt_msg)
769
+ )
749
770
 
750
- async def generate(
751
- self,
752
- message: Union[str, MessageParamT, List[MessageParamT]],
753
- request_params: Optional[RequestParams] = None,
754
- ) -> Union[List[MessageT], Any]:
755
- """Simply return the input message as is."""
756
- # Return in the format expected by the caller
757
- return [message] if isinstance(message, list) else message
771
+ self.history.extend(converted, is_prompt=True)
758
772
 
759
- async def generate_str(
760
- self,
761
- message: Union[str, MessageParamT, List[MessageParamT]],
762
- request_params: Optional[RequestParams] = None,
763
- ) -> str:
764
- """Return the input message as a string."""
765
- self.show_user_message(message, model="fastagent-passthrough", chat_turn=0)
766
- await self.show_assistant_message(message, title="ASSISTANT/PASSTHROUGH")
773
+ # Return the assistant's message with proper handling of different content types
774
+ assistant_text_parts = []
775
+ has_non_text_content = False
776
+
777
+ for content in last_message.content:
778
+ if content.type == "text":
779
+ assistant_text_parts.append(content.text)
780
+ elif content.type == "resource" and hasattr(content.resource, "text"):
781
+ # Add resource text with metadata
782
+ mime_type = getattr(content.resource, "mimeType", "text/plain")
783
+ uri = getattr(content.resource, "uri", "")
784
+ if uri:
785
+ assistant_text_parts.append(
786
+ f"[Resource: {uri}, Type: {mime_type}]\n{content.resource.text}"
787
+ )
788
+ else:
789
+ assistant_text_parts.append(
790
+ f"[Resource Type: {mime_type}]\n{content.resource.text}"
791
+ )
792
+ elif content.type == "image":
793
+ # Note the presence of images
794
+ mime_type = getattr(content, "mimeType", "image/unknown")
795
+ assistant_text_parts.append(f"[Image: {mime_type}]")
796
+ has_non_text_content = True
797
+ else:
798
+ # Other content types
799
+ assistant_text_parts.append(f"[Content of type: {content.type}]")
800
+ has_non_text_content = True
801
+
802
+ # Join all parts with double newlines for better readability
803
+ result = (
804
+ "\n\n".join(assistant_text_parts)
805
+ if assistant_text_parts
806
+ else str(last_message.content)
807
+ )
767
808
 
768
- return str(message)
809
+ # Add a note if non-text content was present
810
+ if has_non_text_content:
811
+ result += "\n\n[Note: This message contained non-text content that may not be fully represented in text format]"
769
812
 
770
- async def generate_structured(
771
- self,
772
- message: Union[str, MessageParamT, List[MessageParamT]],
773
- response_model: Type[ModelT],
774
- request_params: Optional[RequestParams] = None,
775
- ) -> ModelT:
776
- """
777
- Return the input message as the requested model type.
778
- This is a best-effort implementation - it may fail if the
779
- message cannot be converted to the requested model.
780
- """
781
- if isinstance(message, response_model):
782
- return message
783
- elif isinstance(message, dict):
784
- return response_model(**message)
785
- elif isinstance(message, str):
786
- try:
787
- # Try to parse as JSON if it's a string
788
- import json
789
-
790
- data = json.loads(message)
791
- return response_model(**data)
792
- except: # noqa: E722
793
- raise ValueError(
794
- f"Cannot convert message of type {type(message)} to {response_model}"
795
- )
796
- else:
797
- raise ValueError(
798
- f"Cannot convert message of type {type(message)} to {response_model}"
799
- )
813
+ return result