fast-agent-mcp 0.1.8__py3-none-any.whl → 0.1.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {fast_agent_mcp-0.1.8.dist-info → fast_agent_mcp-0.1.10.dist-info}/METADATA +27 -4
- {fast_agent_mcp-0.1.8.dist-info → fast_agent_mcp-0.1.10.dist-info}/RECORD +51 -30
- {fast_agent_mcp-0.1.8.dist-info → fast_agent_mcp-0.1.10.dist-info}/entry_points.txt +1 -0
- mcp_agent/agents/agent.py +114 -8
- mcp_agent/context.py +0 -2
- mcp_agent/core/agent_app.py +89 -13
- mcp_agent/core/factory.py +14 -13
- mcp_agent/core/fastagent.py +15 -5
- mcp_agent/core/mcp_content.py +222 -0
- mcp_agent/core/prompt.py +132 -0
- mcp_agent/core/proxies.py +79 -36
- mcp_agent/logging/listeners.py +3 -6
- mcp_agent/logging/transport.py +30 -3
- mcp_agent/mcp/mcp_agent_client_session.py +21 -145
- mcp_agent/mcp/mcp_aggregator.py +61 -12
- mcp_agent/mcp/mcp_connection_manager.py +0 -1
- mcp_agent/mcp/mime_utils.py +69 -0
- mcp_agent/mcp/prompt_message_multipart.py +64 -0
- mcp_agent/mcp/prompt_serialization.py +447 -0
- mcp_agent/mcp/prompts/__init__.py +0 -0
- mcp_agent/mcp/prompts/__main__.py +10 -0
- mcp_agent/mcp/prompts/prompt_server.py +509 -0
- mcp_agent/mcp/prompts/prompt_template.py +469 -0
- mcp_agent/mcp/resource_utils.py +223 -0
- mcp_agent/mcp/stdio.py +23 -15
- mcp_agent/mcp_server_registry.py +5 -2
- mcp_agent/resources/examples/internal/agent.py +1 -1
- mcp_agent/resources/examples/internal/fastagent.config.yaml +2 -2
- mcp_agent/resources/examples/internal/sizer.py +0 -5
- mcp_agent/resources/examples/prompting/__init__.py +3 -0
- mcp_agent/resources/examples/prompting/agent.py +23 -0
- mcp_agent/resources/examples/prompting/fastagent.config.yaml +44 -0
- mcp_agent/resources/examples/prompting/image_server.py +56 -0
- mcp_agent/resources/examples/workflows/orchestrator.py +3 -3
- mcp_agent/workflows/llm/anthropic_utils.py +101 -0
- mcp_agent/workflows/llm/augmented_llm.py +139 -66
- mcp_agent/workflows/llm/augmented_llm_anthropic.py +127 -251
- mcp_agent/workflows/llm/augmented_llm_openai.py +149 -305
- mcp_agent/workflows/llm/augmented_llm_passthrough.py +99 -1
- mcp_agent/workflows/llm/augmented_llm_playback.py +109 -0
- mcp_agent/workflows/llm/model_factory.py +20 -3
- mcp_agent/workflows/llm/openai_utils.py +65 -0
- mcp_agent/workflows/llm/providers/__init__.py +8 -0
- mcp_agent/workflows/llm/providers/multipart_converter_anthropic.py +348 -0
- mcp_agent/workflows/llm/providers/multipart_converter_openai.py +426 -0
- mcp_agent/workflows/llm/providers/openai_multipart.py +197 -0
- mcp_agent/workflows/llm/providers/sampling_converter_anthropic.py +258 -0
- mcp_agent/workflows/llm/providers/sampling_converter_openai.py +229 -0
- mcp_agent/workflows/llm/sampling_format_converter.py +39 -0
- mcp_agent/core/server_validation.py +0 -44
- mcp_agent/core/simulator_registry.py +0 -22
- mcp_agent/workflows/llm/enhanced_passthrough.py +0 -70
- {fast_agent_mcp-0.1.8.dist-info → fast_agent_mcp-0.1.10.dist-info}/WHEEL +0 -0
- {fast_agent_mcp-0.1.8.dist-info → fast_agent_mcp-0.1.10.dist-info}/licenses/LICENSE +0 -0
@@ -10,15 +10,28 @@ from typing import (
|
|
10
10
|
TYPE_CHECKING,
|
11
11
|
)
|
12
12
|
|
13
|
+
from mcp import CreateMessageResult, SamplingMessage
|
14
|
+
from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
|
15
|
+
from mcp_agent.workflows.llm.sampling_format_converter import (
|
16
|
+
SamplingFormatConverter,
|
17
|
+
MessageParamT,
|
18
|
+
MessageT,
|
19
|
+
)
|
20
|
+
|
21
|
+
# Forward reference for type annotations
|
22
|
+
if TYPE_CHECKING:
|
23
|
+
from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
|
24
|
+
from mcp_agent.agents.agent import Agent
|
25
|
+
from mcp_agent.context import Context
|
26
|
+
|
27
|
+
|
13
28
|
from pydantic import Field
|
14
29
|
|
15
30
|
from mcp.types import (
|
16
31
|
CallToolRequest,
|
17
32
|
CallToolResult,
|
18
33
|
CreateMessageRequestParams,
|
19
|
-
CreateMessageResult,
|
20
34
|
ModelPreferences,
|
21
|
-
SamplingMessage,
|
22
35
|
PromptMessage,
|
23
36
|
TextContent,
|
24
37
|
GetPromptResult,
|
@@ -32,22 +45,10 @@ from mcp_agent.workflows.llm.llm_selector import ModelSelector
|
|
32
45
|
from mcp_agent.ui.console_display import ConsoleDisplay
|
33
46
|
from rich.text import Text
|
34
47
|
|
35
|
-
if TYPE_CHECKING:
|
36
|
-
from mcp_agent.agents.agent import Agent
|
37
|
-
from mcp_agent.context import Context
|
38
|
-
|
39
|
-
MessageParamT = TypeVar("MessageParamT")
|
40
|
-
"""A type representing an input message to an LLM."""
|
41
|
-
|
42
|
-
MessageT = TypeVar("MessageT")
|
43
|
-
"""A type representing an output message from an LLM."""
|
44
48
|
|
45
49
|
ModelT = TypeVar("ModelT")
|
46
50
|
"""A type representing a structured output message from an LLM."""
|
47
51
|
|
48
|
-
# TODO: saqadri - SamplingMessage is fairly limiting - consider extending
|
49
|
-
MCPMessageParam = SamplingMessage
|
50
|
-
MCPMessageResult = CreateMessageResult
|
51
52
|
|
52
53
|
# TODO -- move this to a constant
|
53
54
|
HUMAN_INPUT_TOOL_NAME = "__human_input__"
|
@@ -216,25 +217,10 @@ class AugmentedLLMProtocol(Protocol, Generic[MessageParamT, MessageT]):
|
|
216
217
|
) -> ModelT:
|
217
218
|
"""Request a structured LLM generation and return the result as a Pydantic model."""
|
218
219
|
|
219
|
-
|
220
|
-
|
221
|
-
|
222
|
-
|
223
|
-
@classmethod
|
224
|
-
def to_mcp_message_result(cls, result: MessageT) -> MCPMessageResult:
|
225
|
-
"""Convert an LLM response to an MCP message result type."""
|
226
|
-
|
227
|
-
@classmethod
|
228
|
-
def from_mcp_message_result(cls, result: MCPMessageResult) -> MessageT:
|
229
|
-
"""Convert an MCP message result to an LLM response type."""
|
230
|
-
|
231
|
-
@classmethod
|
232
|
-
def to_mcp_message_param(cls, param: MessageParamT) -> MCPMessageParam:
|
233
|
-
"""Convert an LLM input to an MCP message (SamplingMessage) type."""
|
234
|
-
|
235
|
-
@classmethod
|
236
|
-
def from_mcp_message_param(cls, param: MCPMessageParam) -> MessageParamT:
|
237
|
-
"""Convert an MCP message (SamplingMessage) to an LLM input type."""
|
220
|
+
async def generate_prompt(
|
221
|
+
self, prompt: PromptMessageMultipart, request_params: RequestParams | None
|
222
|
+
) -> str:
|
223
|
+
"""Request an LLM generation and return a string representation of the result"""
|
238
224
|
|
239
225
|
|
240
226
|
class AugmentedLLM(ContextDependent, AugmentedLLMProtocol[MessageParamT, MessageT]):
|
@@ -257,7 +243,7 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol[MessageParamT, Message
|
|
257
243
|
instruction: str | None = None,
|
258
244
|
name: str | None = None,
|
259
245
|
request_params: RequestParams | None = None,
|
260
|
-
type_converter: Type[
|
246
|
+
type_converter: Type[SamplingFormatConverter[MessageParamT, MessageT]] = None,
|
261
247
|
context: Optional["Context"] = None,
|
262
248
|
**kwargs,
|
263
249
|
):
|
@@ -335,6 +321,10 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol[MessageParamT, Message
|
|
335
321
|
) -> ModelT:
|
336
322
|
"""Request a structured LLM generation and return the result as a Pydantic model."""
|
337
323
|
|
324
|
+
# aysnc def generate2_str(self, prompt: PromptMessageMultipart, request_params: RequestParams | None = None) -> List[MessageT]:
|
325
|
+
# """Request an LLM generation, which may run multiple iterations, and return the result"""
|
326
|
+
# return None
|
327
|
+
|
338
328
|
async def select_model(
|
339
329
|
self, request_params: RequestParams | None = None
|
340
330
|
) -> str | None:
|
@@ -379,10 +369,6 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol[MessageParamT, Message
|
|
379
369
|
merged.update(provided_params.model_dump(exclude_unset=True))
|
380
370
|
final_params = RequestParams(**merged)
|
381
371
|
|
382
|
-
# self.logger.debug(
|
383
|
-
# "Final merged params:", extra={"params": final_params.model_dump()}
|
384
|
-
# )
|
385
|
-
|
386
372
|
return final_params
|
387
373
|
|
388
374
|
def get_request_params(
|
@@ -409,24 +395,24 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol[MessageParamT, Message
|
|
409
395
|
|
410
396
|
return default_request_params
|
411
397
|
|
412
|
-
def to_mcp_message_result(self, result: MessageT) ->
|
398
|
+
def to_mcp_message_result(self, result: MessageT) -> CreateMessageResult:
|
413
399
|
"""Convert an LLM response to an MCP message result type."""
|
414
|
-
return self.type_converter.
|
400
|
+
return self.type_converter.to_sampling_result(result)
|
415
401
|
|
416
|
-
def from_mcp_message_result(self, result:
|
402
|
+
def from_mcp_message_result(self, result: CreateMessageResult) -> MessageT:
|
417
403
|
"""Convert an MCP message result to an LLM response type."""
|
418
|
-
return self.type_converter.
|
404
|
+
return self.type_converter.from_sampling_result(result)
|
419
405
|
|
420
|
-
def to_mcp_message_param(self, param: MessageParamT) ->
|
406
|
+
def to_mcp_message_param(self, param: MessageParamT) -> SamplingMessage:
|
421
407
|
"""Convert an LLM input to an MCP message (SamplingMessage) type."""
|
422
|
-
return self.type_converter.
|
408
|
+
return self.type_converter.to_sampling_message(param)
|
423
409
|
|
424
|
-
def from_mcp_message_param(self, param:
|
410
|
+
def from_mcp_message_param(self, param: SamplingMessage) -> MessageParamT:
|
425
411
|
"""Convert an MCP message (SamplingMessage) to an LLM input type."""
|
426
|
-
return self.type_converter.
|
412
|
+
return self.type_converter.from_sampling_message(param)
|
427
413
|
|
428
414
|
def from_mcp_prompt_message(self, message: PromptMessage) -> MessageParamT:
|
429
|
-
return self.type_converter.
|
415
|
+
return self.type_converter.from_prompt_message(message)
|
430
416
|
|
431
417
|
@classmethod
|
432
418
|
def convert_message_to_message_param(
|
@@ -680,10 +666,10 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol[MessageParamT, Message
|
|
680
666
|
String representation of the assistant's response if generated,
|
681
667
|
or the last assistant message in the prompt
|
682
668
|
"""
|
683
|
-
|
669
|
+
from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
|
684
670
|
|
685
671
|
# Check if we have any messages
|
686
|
-
if not
|
672
|
+
if not prompt_result.messages:
|
687
673
|
return "Prompt contains no messages"
|
688
674
|
|
689
675
|
# Extract arguments if they were stored in the result
|
@@ -693,12 +679,36 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol[MessageParamT, Message
|
|
693
679
|
await self.show_prompt_loaded(
|
694
680
|
prompt_name=prompt_name,
|
695
681
|
description=prompt_result.description,
|
696
|
-
message_count=len(
|
682
|
+
message_count=len(prompt_result.messages),
|
697
683
|
arguments=arguments,
|
698
684
|
)
|
699
685
|
|
686
|
+
# Convert to PromptMessageMultipart objects
|
687
|
+
multipart_messages = PromptMessageMultipart.parse_get_prompt_result(
|
688
|
+
prompt_result
|
689
|
+
)
|
690
|
+
|
691
|
+
# Delegate to the provider-specific implementation
|
692
|
+
return await self._apply_prompt_template_provider_specific(multipart_messages)
|
693
|
+
|
694
|
+
async def _apply_prompt_template_provider_specific(
|
695
|
+
self, multipart_messages: List["PromptMessageMultipart"]
|
696
|
+
) -> str:
|
697
|
+
"""
|
698
|
+
Provider-specific implementation of apply_prompt_template.
|
699
|
+
This default implementation handles basic text content for any LLM type.
|
700
|
+
Provider-specific subclasses should override this method to handle
|
701
|
+
multimodal content appropriately.
|
702
|
+
|
703
|
+
Args:
|
704
|
+
multipart_messages: List of PromptMessageMultipart objects parsed from the prompt template
|
705
|
+
|
706
|
+
Returns:
|
707
|
+
String representation of the assistant's response if generated,
|
708
|
+
or the last assistant message in the prompt
|
709
|
+
"""
|
700
710
|
# Check the last message role
|
701
|
-
last_message =
|
711
|
+
last_message = multipart_messages[-1]
|
702
712
|
|
703
713
|
if last_message.role == "user":
|
704
714
|
# For user messages: Add all previous messages to history, then generate response to the last one
|
@@ -707,20 +717,37 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol[MessageParamT, Message
|
|
707
717
|
)
|
708
718
|
|
709
719
|
# Add all but the last message to history
|
710
|
-
if len(
|
711
|
-
previous_messages =
|
720
|
+
if len(multipart_messages) > 1:
|
721
|
+
previous_messages = multipart_messages[:-1]
|
712
722
|
converted = []
|
723
|
+
|
724
|
+
# Fallback generic method for all LLM types
|
713
725
|
for msg in previous_messages:
|
714
|
-
|
726
|
+
# Convert each PromptMessageMultipart to individual PromptMessages
|
727
|
+
prompt_messages = msg.to_prompt_messages()
|
728
|
+
for prompt_msg in prompt_messages:
|
729
|
+
converted.append(
|
730
|
+
self.type_converter.from_prompt_message(prompt_msg)
|
731
|
+
)
|
732
|
+
|
715
733
|
self.history.extend(converted, is_prompt=True)
|
716
734
|
|
717
|
-
#
|
718
|
-
|
719
|
-
|
720
|
-
|
721
|
-
|
722
|
-
|
723
|
-
|
735
|
+
# For generic LLMs, extract text and describe non-text content
|
736
|
+
user_text_parts = []
|
737
|
+
for content in last_message.content:
|
738
|
+
if content.type == "text":
|
739
|
+
user_text_parts.append(content.text)
|
740
|
+
elif content.type == "resource" and hasattr(content.resource, "text"):
|
741
|
+
user_text_parts.append(content.resource.text)
|
742
|
+
elif content.type == "image":
|
743
|
+
# Add a placeholder for images
|
744
|
+
mime_type = getattr(content, "mimeType", "image/unknown")
|
745
|
+
user_text_parts.append(f"[Image: {mime_type}]")
|
746
|
+
|
747
|
+
user_text = "\n".join(user_text_parts) if user_text_parts else ""
|
748
|
+
if not user_text:
|
749
|
+
# Fallback to original method if we couldn't extract text
|
750
|
+
user_text = str(last_message.content)
|
724
751
|
|
725
752
|
return await self.generate_str(user_text)
|
726
753
|
else:
|
@@ -731,10 +758,56 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol[MessageParamT, Message
|
|
731
758
|
|
732
759
|
# Convert and add all messages to history
|
733
760
|
converted = []
|
734
|
-
|
735
|
-
|
761
|
+
|
762
|
+
# Fallback to the original method for all LLM types
|
763
|
+
for msg in multipart_messages:
|
764
|
+
# Convert each PromptMessageMultipart to individual PromptMessages
|
765
|
+
prompt_messages = msg.to_prompt_messages()
|
766
|
+
for prompt_msg in prompt_messages:
|
767
|
+
converted.append(
|
768
|
+
self.type_converter.from_prompt_message(prompt_msg)
|
769
|
+
)
|
770
|
+
|
736
771
|
self.history.extend(converted, is_prompt=True)
|
737
772
|
|
738
|
-
# Return the assistant's message
|
739
|
-
|
740
|
-
|
773
|
+
# Return the assistant's message with proper handling of different content types
|
774
|
+
assistant_text_parts = []
|
775
|
+
has_non_text_content = False
|
776
|
+
|
777
|
+
for content in last_message.content:
|
778
|
+
if content.type == "text":
|
779
|
+
assistant_text_parts.append(content.text)
|
780
|
+
elif content.type == "resource" and hasattr(content.resource, "text"):
|
781
|
+
# Add resource text with metadata
|
782
|
+
mime_type = getattr(content.resource, "mimeType", "text/plain")
|
783
|
+
uri = getattr(content.resource, "uri", "")
|
784
|
+
if uri:
|
785
|
+
assistant_text_parts.append(
|
786
|
+
f"[Resource: {uri}, Type: {mime_type}]\n{content.resource.text}"
|
787
|
+
)
|
788
|
+
else:
|
789
|
+
assistant_text_parts.append(
|
790
|
+
f"[Resource Type: {mime_type}]\n{content.resource.text}"
|
791
|
+
)
|
792
|
+
elif content.type == "image":
|
793
|
+
# Note the presence of images
|
794
|
+
mime_type = getattr(content, "mimeType", "image/unknown")
|
795
|
+
assistant_text_parts.append(f"[Image: {mime_type}]")
|
796
|
+
has_non_text_content = True
|
797
|
+
else:
|
798
|
+
# Other content types
|
799
|
+
assistant_text_parts.append(f"[Content of type: {content.type}]")
|
800
|
+
has_non_text_content = True
|
801
|
+
|
802
|
+
# Join all parts with double newlines for better readability
|
803
|
+
result = (
|
804
|
+
"\n\n".join(assistant_text_parts)
|
805
|
+
if assistant_text_parts
|
806
|
+
else str(last_message.content)
|
807
|
+
)
|
808
|
+
|
809
|
+
# Add a note if non-text content was present
|
810
|
+
if has_non_text_content:
|
811
|
+
result += "\n\n[Note: This message contained non-text content that may not be fully represented in text format]"
|
812
|
+
|
813
|
+
return result
|