fast-agent-mcp 0.1.7__py3-none-any.whl → 0.1.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {fast_agent_mcp-0.1.7.dist-info → fast_agent_mcp-0.1.9.dist-info}/METADATA +37 -9
- {fast_agent_mcp-0.1.7.dist-info → fast_agent_mcp-0.1.9.dist-info}/RECORD +53 -31
- {fast_agent_mcp-0.1.7.dist-info → fast_agent_mcp-0.1.9.dist-info}/entry_points.txt +1 -0
- mcp_agent/agents/agent.py +5 -11
- mcp_agent/core/agent_app.py +125 -44
- mcp_agent/core/decorators.py +3 -2
- mcp_agent/core/enhanced_prompt.py +106 -20
- mcp_agent/core/factory.py +28 -66
- mcp_agent/core/fastagent.py +13 -3
- mcp_agent/core/mcp_content.py +222 -0
- mcp_agent/core/prompt.py +132 -0
- mcp_agent/core/proxies.py +41 -36
- mcp_agent/human_input/handler.py +4 -1
- mcp_agent/logging/transport.py +30 -3
- mcp_agent/mcp/mcp_aggregator.py +27 -22
- mcp_agent/mcp/mime_utils.py +69 -0
- mcp_agent/mcp/prompt_message_multipart.py +64 -0
- mcp_agent/mcp/prompt_serialization.py +447 -0
- mcp_agent/mcp/prompts/__init__.py +0 -0
- mcp_agent/mcp/prompts/__main__.py +10 -0
- mcp_agent/mcp/prompts/prompt_server.py +508 -0
- mcp_agent/mcp/prompts/prompt_template.py +469 -0
- mcp_agent/mcp/resource_utils.py +203 -0
- mcp_agent/resources/examples/internal/agent.py +1 -1
- mcp_agent/resources/examples/internal/fastagent.config.yaml +2 -2
- mcp_agent/resources/examples/internal/sizer.py +0 -5
- mcp_agent/resources/examples/prompting/__init__.py +3 -0
- mcp_agent/resources/examples/prompting/agent.py +23 -0
- mcp_agent/resources/examples/prompting/fastagent.config.yaml +44 -0
- mcp_agent/resources/examples/prompting/image_server.py +56 -0
- mcp_agent/resources/examples/researcher/researcher-eval.py +1 -1
- mcp_agent/resources/examples/workflows/orchestrator.py +5 -4
- mcp_agent/resources/examples/workflows/router.py +0 -2
- mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py +57 -87
- mcp_agent/workflows/llm/anthropic_utils.py +101 -0
- mcp_agent/workflows/llm/augmented_llm.py +155 -141
- mcp_agent/workflows/llm/augmented_llm_anthropic.py +135 -281
- mcp_agent/workflows/llm/augmented_llm_openai.py +175 -337
- mcp_agent/workflows/llm/augmented_llm_passthrough.py +104 -0
- mcp_agent/workflows/llm/augmented_llm_playback.py +109 -0
- mcp_agent/workflows/llm/model_factory.py +25 -6
- mcp_agent/workflows/llm/openai_utils.py +65 -0
- mcp_agent/workflows/llm/providers/__init__.py +8 -0
- mcp_agent/workflows/llm/providers/multipart_converter_anthropic.py +348 -0
- mcp_agent/workflows/llm/providers/multipart_converter_openai.py +426 -0
- mcp_agent/workflows/llm/providers/openai_multipart.py +197 -0
- mcp_agent/workflows/llm/providers/sampling_converter_anthropic.py +258 -0
- mcp_agent/workflows/llm/providers/sampling_converter_openai.py +229 -0
- mcp_agent/workflows/llm/sampling_format_converter.py +39 -0
- mcp_agent/workflows/orchestrator/orchestrator.py +62 -153
- mcp_agent/workflows/router/router_llm.py +18 -24
- mcp_agent/core/server_validation.py +0 -44
- mcp_agent/core/simulator_registry.py +0 -22
- mcp_agent/workflows/llm/enhanced_passthrough.py +0 -70
- {fast_agent_mcp-0.1.7.dist-info → fast_agent_mcp-0.1.9.dist-info}/WHEEL +0 -0
- {fast_agent_mcp-0.1.7.dist-info → fast_agent_mcp-0.1.9.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,104 @@
|
|
1
|
+
from typing import Any, List, Optional, Type, Union
|
2
|
+
|
3
|
+
from mcp import GetPromptResult
|
4
|
+
from mcp.types import PromptMessage
|
5
|
+
from pydantic_core import from_json
|
6
|
+
from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
|
7
|
+
from mcp_agent.workflows.llm.augmented_llm import (
|
8
|
+
AugmentedLLM,
|
9
|
+
MessageParamT,
|
10
|
+
MessageT,
|
11
|
+
ModelT,
|
12
|
+
RequestParams,
|
13
|
+
)
|
14
|
+
from mcp_agent.logging.logger import get_logger
|
15
|
+
|
16
|
+
|
17
|
+
class PassthroughLLM(AugmentedLLM):
|
18
|
+
"""
|
19
|
+
A specialized LLM implementation that simply passes through input messages without modification.
|
20
|
+
|
21
|
+
This is useful for cases where you need an object with the AugmentedLLM interface
|
22
|
+
but want to preserve the original message without any processing, such as in a
|
23
|
+
parallel workflow where no fan-in aggregation is needed.
|
24
|
+
"""
|
25
|
+
|
26
|
+
def __init__(self, name: str = "Passthrough", context=None, **kwargs):
|
27
|
+
super().__init__(name=name, context=context, **kwargs)
|
28
|
+
self.provider = "fast-agent"
|
29
|
+
# Initialize logger - keep it simple without name reference
|
30
|
+
self.logger = get_logger(__name__)
|
31
|
+
self._messages = [PromptMessage]
|
32
|
+
|
33
|
+
async def generate(
|
34
|
+
self,
|
35
|
+
message: Union[str, MessageParamT, List[MessageParamT]],
|
36
|
+
request_params: Optional[RequestParams] = None,
|
37
|
+
) -> Union[List[MessageT], Any]:
|
38
|
+
"""Simply return the input message as is."""
|
39
|
+
# Return in the format expected by the caller
|
40
|
+
return [message] if isinstance(message, list) else message
|
41
|
+
|
42
|
+
async def generate_str(
|
43
|
+
self,
|
44
|
+
message: Union[str, MessageParamT, List[MessageParamT]],
|
45
|
+
request_params: Optional[RequestParams] = None,
|
46
|
+
) -> str:
|
47
|
+
"""Return the input message as a string."""
|
48
|
+
self.show_user_message(message, model="fastagent-passthrough", chat_turn=0)
|
49
|
+
await self.show_assistant_message(message, title="ASSISTANT/PASSTHROUGH")
|
50
|
+
|
51
|
+
return str(message)
|
52
|
+
|
53
|
+
async def generate_structured(
|
54
|
+
self,
|
55
|
+
message: Union[str, MessageParamT, List[MessageParamT]],
|
56
|
+
response_model: Type[ModelT],
|
57
|
+
request_params: Optional[RequestParams] = None,
|
58
|
+
) -> ModelT:
|
59
|
+
"""
|
60
|
+
Return the input message as the requested model type.
|
61
|
+
This is a best-effort implementation - it may fail if the
|
62
|
+
message cannot be converted to the requested model.
|
63
|
+
"""
|
64
|
+
if isinstance(message, response_model):
|
65
|
+
return message
|
66
|
+
elif isinstance(message, dict):
|
67
|
+
return response_model(**message)
|
68
|
+
elif isinstance(message, str):
|
69
|
+
return response_model.model_validate(from_json(message, allow_partial=True))
|
70
|
+
|
71
|
+
async def generate_prompt(
|
72
|
+
self, prompt: "PromptMessageMultipart", request_params: RequestParams | None
|
73
|
+
) -> str:
|
74
|
+
return await self.generate_str(prompt.content[0].text, request_params)
|
75
|
+
|
76
|
+
async def apply_prompt_template(
|
77
|
+
self, prompt_result: GetPromptResult, prompt_name: str
|
78
|
+
) -> str:
|
79
|
+
"""
|
80
|
+
Apply a prompt template by adding it to the conversation history.
|
81
|
+
If the last message in the prompt is from a user, automatically
|
82
|
+
generate an assistant response.
|
83
|
+
|
84
|
+
Args:
|
85
|
+
prompt_result: The GetPromptResult containing prompt messages
|
86
|
+
prompt_name: The name of the prompt being applied
|
87
|
+
|
88
|
+
Returns:
|
89
|
+
String representation of the assistant's response if generated,
|
90
|
+
or the last assistant message in the prompt
|
91
|
+
"""
|
92
|
+
prompt_messages: List[PromptMessage] = prompt_result.messages
|
93
|
+
|
94
|
+
# Extract arguments if they were stored in the result
|
95
|
+
arguments = getattr(prompt_result, "arguments", None)
|
96
|
+
|
97
|
+
# Display information about the loaded prompt
|
98
|
+
await self.show_prompt_loaded(
|
99
|
+
prompt_name=prompt_name,
|
100
|
+
description=prompt_result.description,
|
101
|
+
message_count=len(prompt_messages),
|
102
|
+
arguments=arguments,
|
103
|
+
)
|
104
|
+
self._messages = prompt_messages
|
@@ -0,0 +1,109 @@
|
|
1
|
+
from typing import List, Optional, Union
|
2
|
+
from mcp import GetPromptResult
|
3
|
+
from mcp.types import PromptMessage
|
4
|
+
from mcp_agent.workflows.llm.augmented_llm import MessageParamT, RequestParams
|
5
|
+
from mcp_agent.workflows.llm.augmented_llm_passthrough import PassthroughLLM
|
6
|
+
|
7
|
+
|
8
|
+
# TODO -- support tool calling
|
9
|
+
class PlaybackLLM(PassthroughLLM):
|
10
|
+
"""
|
11
|
+
A specialized LLM implementation that plays back assistant messages when loaded with prompts.
|
12
|
+
|
13
|
+
Unlike the PassthroughLLM which simply passes through messages without modification,
|
14
|
+
PlaybackLLM is designed to simulate a conversation by playing back prompt messages
|
15
|
+
in sequence when loaded with prompts through apply_prompt_template.
|
16
|
+
|
17
|
+
After apply_prompts has been called, each call to generate_str returns the next
|
18
|
+
"ASSISTANT" message in the loaded messages. If no messages are set or all messages have
|
19
|
+
been played back, it returns a message indicating that messages are exhausted.
|
20
|
+
"""
|
21
|
+
|
22
|
+
def __init__(self, name: str = "Playback", **kwargs):
|
23
|
+
super().__init__(name=name, **kwargs)
|
24
|
+
self._messages: List[PromptMessage] = []
|
25
|
+
self._current_index = 0
|
26
|
+
|
27
|
+
async def generate_str(
|
28
|
+
self,
|
29
|
+
message: Union[str, MessageParamT, List[MessageParamT]],
|
30
|
+
request_params: Optional[RequestParams] = None,
|
31
|
+
) -> str:
|
32
|
+
"""
|
33
|
+
Return the next ASSISTANT message in the loaded messages list.
|
34
|
+
If no messages are available or all have been played back,
|
35
|
+
returns a message indicating messages are exhausted.
|
36
|
+
|
37
|
+
Note: Only assistant messages are returned; user messages are skipped.
|
38
|
+
"""
|
39
|
+
self.show_user_message(message, model="fastagent-playback", chat_turn=0)
|
40
|
+
|
41
|
+
if not self._messages or self._current_index >= len(self._messages):
|
42
|
+
size = len(self._messages) if self._messages else 0
|
43
|
+
response = f"MESSAGES EXHAUSTED (list size {size})"
|
44
|
+
else:
|
45
|
+
response = self._get_next_assistant_message()
|
46
|
+
|
47
|
+
await self.show_assistant_message(response, title="ASSISTANT/PLAYBACK")
|
48
|
+
return response
|
49
|
+
|
50
|
+
def _get_next_assistant_message(self) -> str:
|
51
|
+
"""
|
52
|
+
Get the next assistant message from the loaded messages.
|
53
|
+
Increments the current message index and skips user messages.
|
54
|
+
"""
|
55
|
+
# Find next assistant message
|
56
|
+
while self._current_index < len(self._messages):
|
57
|
+
message = self._messages[self._current_index]
|
58
|
+
self._current_index += 1
|
59
|
+
|
60
|
+
# Skip non-assistant messages
|
61
|
+
if getattr(message, "role", None) != "assistant":
|
62
|
+
continue
|
63
|
+
|
64
|
+
# Get content as string
|
65
|
+
content = message.content
|
66
|
+
if hasattr(content, "text"):
|
67
|
+
return content.text
|
68
|
+
return str(content)
|
69
|
+
|
70
|
+
# If we get here, we've run out of assistant messages
|
71
|
+
return f"MESSAGES EXHAUSTED (list size {len(self._messages)})"
|
72
|
+
|
73
|
+
async def apply_prompt_template(
|
74
|
+
self, prompt_result: GetPromptResult, prompt_name: str
|
75
|
+
) -> str:
|
76
|
+
"""
|
77
|
+
Apply a prompt template by adding its messages to the playback queue.
|
78
|
+
|
79
|
+
Args:
|
80
|
+
prompt_result: The GetPromptResult containing prompt messages
|
81
|
+
prompt_name: The name of the prompt being applied
|
82
|
+
|
83
|
+
Returns:
|
84
|
+
String representation of the first message or an indication that no messages were added
|
85
|
+
"""
|
86
|
+
prompt_messages: List[PromptMessage] = prompt_result.messages
|
87
|
+
|
88
|
+
# Extract arguments if they were stored in the result
|
89
|
+
arguments = getattr(prompt_result, "arguments", None)
|
90
|
+
|
91
|
+
# Display information about the loaded prompt
|
92
|
+
await self.show_prompt_loaded(
|
93
|
+
prompt_name=prompt_name,
|
94
|
+
description=prompt_result.description,
|
95
|
+
message_count=len(prompt_messages),
|
96
|
+
arguments=arguments,
|
97
|
+
)
|
98
|
+
|
99
|
+
# Add new messages to the end of the existing messages list
|
100
|
+
self._messages.extend(prompt_messages)
|
101
|
+
|
102
|
+
if not prompt_messages:
|
103
|
+
return "Prompt contains no messages"
|
104
|
+
|
105
|
+
# Reset current index if this is the first time loading messages
|
106
|
+
if len(self._messages) == len(prompt_messages):
|
107
|
+
self._current_index = 0
|
108
|
+
|
109
|
+
return f"Added {len(prompt_messages)} messages to playback queue"
|
@@ -7,10 +7,17 @@ from mcp_agent.core.exceptions import ModelConfigError
|
|
7
7
|
from mcp_agent.workflows.llm.augmented_llm_anthropic import AnthropicAugmentedLLM
|
8
8
|
from mcp_agent.workflows.llm.augmented_llm_openai import OpenAIAugmentedLLM
|
9
9
|
from mcp_agent.workflows.llm.augmented_llm import RequestParams
|
10
|
-
from mcp_agent.workflows.llm.
|
10
|
+
from mcp_agent.workflows.llm.augmented_llm_passthrough import PassthroughLLM
|
11
|
+
from mcp_agent.workflows.llm.augmented_llm_playback import PlaybackLLM
|
12
|
+
|
11
13
|
|
12
14
|
# Type alias for LLM classes
|
13
|
-
LLMClass = Union[
|
15
|
+
LLMClass = Union[
|
16
|
+
Type[AnthropicAugmentedLLM],
|
17
|
+
Type[OpenAIAugmentedLLM],
|
18
|
+
Type[PassthroughLLM],
|
19
|
+
Type[PlaybackLLM],
|
20
|
+
]
|
14
21
|
|
15
22
|
|
16
23
|
class Provider(Enum):
|
@@ -18,7 +25,7 @@ class Provider(Enum):
|
|
18
25
|
|
19
26
|
ANTHROPIC = auto()
|
20
27
|
OPENAI = auto()
|
21
|
-
|
28
|
+
FAST_AGENT = auto()
|
22
29
|
|
23
30
|
|
24
31
|
class ReasoningEffort(Enum):
|
@@ -45,6 +52,7 @@ class ModelFactory:
|
|
45
52
|
PROVIDER_MAP = {
|
46
53
|
"anthropic": Provider.ANTHROPIC,
|
47
54
|
"openai": Provider.OPENAI,
|
55
|
+
"fast-agent": Provider.FAST_AGENT,
|
48
56
|
}
|
49
57
|
|
50
58
|
# Mapping of effort strings to enum values
|
@@ -54,11 +62,13 @@ class ModelFactory:
|
|
54
62
|
"high": ReasoningEffort.HIGH,
|
55
63
|
}
|
56
64
|
|
57
|
-
# TODO -- add context window size information for display/
|
65
|
+
# TODO -- add context window size information for display/management
|
58
66
|
# TODO -- add audio supporting got-4o-audio-preview
|
59
67
|
# TODO -- bring model parameter configuration here
|
60
68
|
# Mapping of model names to their default providers
|
61
69
|
DEFAULT_PROVIDERS = {
|
70
|
+
"passthrough": Provider.FAST_AGENT,
|
71
|
+
"playback": Provider.FAST_AGENT,
|
62
72
|
"gpt-4o": Provider.OPENAI,
|
63
73
|
"gpt-4o-mini": Provider.OPENAI,
|
64
74
|
"o1-mini": Provider.OPENAI,
|
@@ -93,7 +103,13 @@ class ModelFactory:
|
|
93
103
|
PROVIDER_CLASSES: Dict[Provider, LLMClass] = {
|
94
104
|
Provider.ANTHROPIC: AnthropicAugmentedLLM,
|
95
105
|
Provider.OPENAI: OpenAIAugmentedLLM,
|
96
|
-
Provider.
|
106
|
+
Provider.FAST_AGENT: PassthroughLLM,
|
107
|
+
}
|
108
|
+
|
109
|
+
# Mapping of special model names to their specific LLM classes
|
110
|
+
# This overrides the provider-based class selection
|
111
|
+
MODEL_SPECIFIC_CLASSES: Dict[str, LLMClass] = {
|
112
|
+
"playback": PlaybackLLM,
|
97
113
|
}
|
98
114
|
|
99
115
|
@classmethod
|
@@ -149,7 +165,10 @@ class ModelFactory:
|
|
149
165
|
"""
|
150
166
|
# Parse configuration up front
|
151
167
|
config = cls.parse_model_string(model_string)
|
152
|
-
|
168
|
+
if config.model_name in cls.MODEL_SPECIFIC_CLASSES:
|
169
|
+
llm_class = cls.MODEL_SPECIFIC_CLASSES[config.model_name]
|
170
|
+
else:
|
171
|
+
llm_class = cls.PROVIDER_CLASSES[config.provider]
|
153
172
|
|
154
173
|
# Create a factory function matching the attach_llm protocol
|
155
174
|
def factory(agent: Agent, **kwargs) -> LLMClass:
|
@@ -0,0 +1,65 @@
|
|
1
|
+
"""
|
2
|
+
Utility functions for OpenAI integration with MCP.
|
3
|
+
|
4
|
+
This file provides backward compatibility with the existing API while
|
5
|
+
delegating to the proper implementations in the providers/ directory.
|
6
|
+
"""
|
7
|
+
|
8
|
+
from typing import Dict, Any, Union
|
9
|
+
|
10
|
+
from openai.types.chat import (
|
11
|
+
ChatCompletionMessage,
|
12
|
+
ChatCompletionMessageParam,
|
13
|
+
)
|
14
|
+
|
15
|
+
from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
|
16
|
+
from mcp_agent.workflows.llm.providers.multipart_converter_openai import OpenAIConverter
|
17
|
+
from mcp_agent.workflows.llm.providers.openai_multipart import (
|
18
|
+
openai_to_multipart,
|
19
|
+
)
|
20
|
+
|
21
|
+
|
22
|
+
def openai_message_to_prompt_message_multipart(
|
23
|
+
message: Union[ChatCompletionMessage, Dict[str, Any]],
|
24
|
+
) -> PromptMessageMultipart:
|
25
|
+
"""
|
26
|
+
Convert an OpenAI ChatCompletionMessage to a PromptMessageMultipart.
|
27
|
+
|
28
|
+
Args:
|
29
|
+
message: The OpenAI message to convert (can be an actual ChatCompletionMessage
|
30
|
+
or a dictionary with the same structure)
|
31
|
+
|
32
|
+
Returns:
|
33
|
+
A PromptMessageMultipart representation
|
34
|
+
"""
|
35
|
+
return openai_to_multipart(message)
|
36
|
+
|
37
|
+
|
38
|
+
def openai_message_param_to_prompt_message_multipart(
|
39
|
+
message_param: ChatCompletionMessageParam,
|
40
|
+
) -> PromptMessageMultipart:
|
41
|
+
"""
|
42
|
+
Convert an OpenAI ChatCompletionMessageParam to a PromptMessageMultipart.
|
43
|
+
|
44
|
+
Args:
|
45
|
+
message_param: The OpenAI message param to convert
|
46
|
+
|
47
|
+
Returns:
|
48
|
+
A PromptMessageMultipart representation
|
49
|
+
"""
|
50
|
+
return openai_to_multipart(message_param)
|
51
|
+
|
52
|
+
|
53
|
+
def prompt_message_multipart_to_openai_message_param(
|
54
|
+
multipart: PromptMessageMultipart,
|
55
|
+
) -> ChatCompletionMessageParam:
|
56
|
+
"""
|
57
|
+
Convert a PromptMessageMultipart to an OpenAI ChatCompletionMessageParam.
|
58
|
+
|
59
|
+
Args:
|
60
|
+
multipart: The PromptMessageMultipart to convert
|
61
|
+
|
62
|
+
Returns:
|
63
|
+
An OpenAI ChatCompletionMessageParam representation
|
64
|
+
"""
|
65
|
+
return OpenAIConverter.convert_to_openai(multipart)
|
@@ -0,0 +1,8 @@
|
|
1
|
+
from mcp_agent.workflows.llm.providers.sampling_converter_anthropic import (
|
2
|
+
AnthropicSamplingConverter,
|
3
|
+
)
|
4
|
+
from mcp_agent.workflows.llm.providers.sampling_converter_openai import (
|
5
|
+
OpenAISamplingConverter,
|
6
|
+
)
|
7
|
+
|
8
|
+
__all__ = ["AnthropicSamplingConverter", "OpenAISamplingConverter"]
|