fast-agent-mcp 0.1.11__py3-none-any.whl → 0.1.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of fast-agent-mcp might be problematic. Click here for more details.
- {fast_agent_mcp-0.1.11.dist-info → fast_agent_mcp-0.1.12.dist-info}/METADATA +1 -1
- {fast_agent_mcp-0.1.11.dist-info → fast_agent_mcp-0.1.12.dist-info}/RECORD +39 -38
- mcp_agent/agents/agent.py +1 -24
- mcp_agent/app.py +0 -5
- mcp_agent/context.py +0 -2
- mcp_agent/core/agent_app.py +1 -1
- mcp_agent/core/agent_types.py +29 -2
- mcp_agent/core/decorators.py +1 -2
- mcp_agent/core/error_handling.py +1 -1
- mcp_agent/core/factory.py +2 -3
- mcp_agent/core/mcp_content.py +2 -3
- mcp_agent/core/request_params.py +43 -0
- mcp_agent/core/types.py +4 -2
- mcp_agent/core/validation.py +14 -15
- mcp_agent/logging/transport.py +2 -2
- mcp_agent/mcp/interfaces.py +37 -3
- mcp_agent/mcp/mcp_agent_client_session.py +1 -1
- mcp_agent/mcp/mcp_aggregator.py +5 -6
- mcp_agent/mcp/sampling.py +60 -53
- mcp_agent/mcp_server/__init__.py +1 -1
- mcp_agent/resources/examples/prompting/__init__.py +1 -1
- mcp_agent/ui/console_display.py +2 -2
- mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py +2 -2
- mcp_agent/workflows/llm/augmented_llm.py +42 -102
- mcp_agent/workflows/llm/augmented_llm_anthropic.py +4 -3
- mcp_agent/workflows/llm/augmented_llm_openai.py +4 -3
- mcp_agent/workflows/llm/augmented_llm_passthrough.py +33 -4
- mcp_agent/workflows/llm/model_factory.py +1 -1
- mcp_agent/workflows/llm/prompt_utils.py +42 -28
- mcp_agent/workflows/llm/providers/multipart_converter_anthropic.py +244 -140
- mcp_agent/workflows/llm/providers/multipart_converter_openai.py +230 -185
- mcp_agent/workflows/llm/providers/sampling_converter_anthropic.py +5 -204
- mcp_agent/workflows/llm/providers/sampling_converter_openai.py +9 -207
- mcp_agent/workflows/llm/sampling_converter.py +124 -0
- mcp_agent/workflows/llm/sampling_format_converter.py +0 -17
- mcp_agent/workflows/router/router_base.py +10 -10
- mcp_agent/workflows/llm/llm_selector.py +0 -345
- {fast_agent_mcp-0.1.11.dist-info → fast_agent_mcp-0.1.12.dist-info}/WHEEL +0 -0
- {fast_agent_mcp-0.1.11.dist-info → fast_agent_mcp-0.1.12.dist-info}/entry_points.txt +0 -0
- {fast_agent_mcp-0.1.11.dist-info → fast_agent_mcp-0.1.12.dist-info}/licenses/LICENSE +0 -0
@@ -1,27 +1,16 @@
|
|
1
|
-
import
|
2
|
-
from
|
3
|
-
|
4
|
-
|
1
|
+
from mcp import StopReason
|
2
|
+
from mcp_agent.workflows.llm.providers.multipart_converter_anthropic import (
|
3
|
+
AnthropicConverter,
|
4
|
+
)
|
5
5
|
from mcp_agent.workflows.llm.sampling_format_converter import SamplingFormatConverter
|
6
6
|
|
7
7
|
from mcp.types import (
|
8
8
|
PromptMessage,
|
9
|
-
EmbeddedResource,
|
10
|
-
ImageContent,
|
11
|
-
TextContent,
|
12
|
-
TextResourceContents,
|
13
9
|
)
|
14
10
|
|
15
11
|
from anthropic.types import (
|
16
|
-
ContentBlock,
|
17
|
-
DocumentBlockParam,
|
18
12
|
Message,
|
19
13
|
MessageParam,
|
20
|
-
ImageBlockParam,
|
21
|
-
TextBlock,
|
22
|
-
TextBlockParam,
|
23
|
-
ToolResultBlockParam,
|
24
|
-
ToolUseBlockParam,
|
25
14
|
)
|
26
15
|
|
27
16
|
from mcp_agent.logging.logger import get_logger
|
@@ -34,171 +23,10 @@ class AnthropicSamplingConverter(SamplingFormatConverter[MessageParam, Message])
|
|
34
23
|
Convert between Anthropic and MCP types.
|
35
24
|
"""
|
36
25
|
|
37
|
-
@classmethod
|
38
|
-
def from_sampling_result(cls, result: CreateMessageResult) -> Message:
|
39
|
-
# -> Message
|
40
|
-
if result.role != "assistant":
|
41
|
-
raise ValueError(
|
42
|
-
f"Expected role to be 'assistant' but got '{result.role}' instead."
|
43
|
-
)
|
44
|
-
|
45
|
-
return Message(
|
46
|
-
role="assistant",
|
47
|
-
type="message",
|
48
|
-
content=[mcp_content_to_anthropic_content(result.content)],
|
49
|
-
stop_reason=mcp_stop_reason_to_anthropic_stop_reason(result.stopReason),
|
50
|
-
model=result.model,
|
51
|
-
usage={"input_tokens": 0, "output_tokens": 0},
|
52
|
-
id="sampling_id",
|
53
|
-
# TODO -- incorporate usage info and message identity
|
54
|
-
)
|
55
|
-
|
56
|
-
@classmethod
|
57
|
-
def to_sampling_result(cls, result: Message) -> CreateMessageResult:
|
58
|
-
contents = anthropic_content_to_mcp_content(result.content)
|
59
|
-
if len(contents) > 1:
|
60
|
-
raise NotImplementedError(
|
61
|
-
"Multiple content elements in a single message are not supported in MCP yet"
|
62
|
-
)
|
63
|
-
mcp_content = contents[0]
|
64
|
-
|
65
|
-
# Create a dictionary with required fields
|
66
|
-
result_dict = {
|
67
|
-
"role": result.role,
|
68
|
-
"content": mcp_content,
|
69
|
-
"model": result.model,
|
70
|
-
"stopReason": anthropic_stop_reason_to_mcp_stop_reason(result.stop_reason),
|
71
|
-
}
|
72
|
-
|
73
|
-
# Add any other fields from the original message that might be needed
|
74
|
-
extras = result.model_dump(exclude={"role", "content", "model", "stop_reason"})
|
75
|
-
if extras:
|
76
|
-
# Only include compatible fields to avoid validation errors
|
77
|
-
# Skip fields that would cause validation issues with CreateMessageResult
|
78
|
-
safe_extras = {
|
79
|
-
k: v for k, v in extras.items() if k in CreateMessageResult.model_fields
|
80
|
-
}
|
81
|
-
result_dict.update(safe_extras)
|
82
|
-
|
83
|
-
return CreateMessageResult(**result_dict)
|
84
|
-
|
85
|
-
@classmethod
|
86
|
-
def from_sampling_message(cls, param: SamplingMessage) -> MessageParam:
|
87
|
-
extras = param.model_dump(exclude={"role", "content"})
|
88
|
-
return MessageParam(
|
89
|
-
role=param.role,
|
90
|
-
content=[mcp_content_to_anthropic_content(param.content)],
|
91
|
-
**extras,
|
92
|
-
)
|
93
|
-
|
94
|
-
@classmethod
|
95
|
-
def to_sampling_message(cls, param: MessageParam) -> SamplingMessage:
|
96
|
-
# Implement the conversion from ChatCompletionMessage to MCP message param
|
97
|
-
|
98
|
-
contents = anthropic_content_to_mcp_content(param["content"])
|
99
|
-
|
100
|
-
# TODO: saqadri - the mcp_content can have multiple elements
|
101
|
-
# while sampling message content has a single content element
|
102
|
-
# Right now we error out if there are > 1 elements in mcp_content
|
103
|
-
# We need to handle this case properly going forward
|
104
|
-
if len(contents) > 1:
|
105
|
-
raise NotImplementedError(
|
106
|
-
"Multiple content elements in a single message are not supported"
|
107
|
-
)
|
108
|
-
mcp_content = contents[0]
|
109
|
-
|
110
|
-
# Only include fields that are valid for SamplingMessage
|
111
|
-
extras = {
|
112
|
-
k: v
|
113
|
-
for k, v in param.items()
|
114
|
-
if k not in ["role", "content"] and k in SamplingMessage.model_fields
|
115
|
-
}
|
116
|
-
|
117
|
-
return SamplingMessage(
|
118
|
-
role=param["role"],
|
119
|
-
content=mcp_content,
|
120
|
-
**extras,
|
121
|
-
)
|
122
|
-
|
123
26
|
@classmethod
|
124
27
|
def from_prompt_message(cls, message: PromptMessage) -> MessageParam:
|
125
28
|
"""Convert an MCP PromptMessage to an Anthropic MessageParam."""
|
126
|
-
|
127
|
-
# Extract content text
|
128
|
-
content_text = (
|
129
|
-
message.content.text
|
130
|
-
if hasattr(message.content, "text")
|
131
|
-
else str(message.content)
|
132
|
-
)
|
133
|
-
|
134
|
-
# Extract extras for flexibility
|
135
|
-
extras = message.model_dump(exclude={"role", "content"})
|
136
|
-
|
137
|
-
# Handle based on role
|
138
|
-
if message.role == "user":
|
139
|
-
return {"role": "user", "content": content_text, **extras}
|
140
|
-
elif message.role == "assistant":
|
141
|
-
return {
|
142
|
-
"role": "assistant",
|
143
|
-
"content": [{"type": "text", "text": content_text}],
|
144
|
-
**extras,
|
145
|
-
}
|
146
|
-
else:
|
147
|
-
# Fall back to user for any unrecognized role, including "system"
|
148
|
-
_logger.warning(
|
149
|
-
f"Unsupported role '{message.role}' in PromptMessage. Falling back to 'user' role."
|
150
|
-
)
|
151
|
-
return {
|
152
|
-
"role": "user",
|
153
|
-
"content": f"[{message.role.upper()}] {content_text}",
|
154
|
-
**extras,
|
155
|
-
}
|
156
|
-
|
157
|
-
|
158
|
-
def anthropic_content_to_mcp_content(
|
159
|
-
content: str
|
160
|
-
| Iterable[
|
161
|
-
TextBlockParam
|
162
|
-
| ImageBlockParam
|
163
|
-
| ToolUseBlockParam
|
164
|
-
| ToolResultBlockParam
|
165
|
-
| DocumentBlockParam
|
166
|
-
| ContentBlock
|
167
|
-
],
|
168
|
-
) -> List[TextContent | ImageContent | EmbeddedResource]:
|
169
|
-
mcp_content = []
|
170
|
-
|
171
|
-
if isinstance(content, str):
|
172
|
-
mcp_content.append(TextContent(type="text", text=content))
|
173
|
-
else:
|
174
|
-
for block in content:
|
175
|
-
if block.type == "text":
|
176
|
-
mcp_content.append(TextContent(type="text", text=block.text))
|
177
|
-
elif block.type == "image":
|
178
|
-
raise NotImplementedError("Image content conversion not implemented")
|
179
|
-
elif block.type == "tool_use":
|
180
|
-
# Best effort to convert a tool use to text (since there's no ToolUseContent)
|
181
|
-
mcp_content.append(
|
182
|
-
TextContent(
|
183
|
-
type="text",
|
184
|
-
text=to_string(block),
|
185
|
-
)
|
186
|
-
)
|
187
|
-
elif block.type == "tool_result":
|
188
|
-
# Best effort to convert a tool result to text (since there's no ToolResultContent)
|
189
|
-
mcp_content.append(
|
190
|
-
TextContent(
|
191
|
-
type="text",
|
192
|
-
text=to_string(block),
|
193
|
-
)
|
194
|
-
)
|
195
|
-
elif block.type == "document":
|
196
|
-
raise NotImplementedError("Document content conversion not implemented")
|
197
|
-
else:
|
198
|
-
# Last effort to convert the content to a string
|
199
|
-
mcp_content.append(TextContent(type="text", text=str(block)))
|
200
|
-
|
201
|
-
return mcp_content
|
29
|
+
return AnthropicConverter.convert_prompt_message_to_anthropic(message)
|
202
30
|
|
203
31
|
|
204
32
|
def mcp_stop_reason_to_anthropic_stop_reason(stop_reason: StopReason):
|
@@ -229,30 +57,3 @@ def anthropic_stop_reason_to_mcp_stop_reason(stop_reason: str) -> StopReason:
|
|
229
57
|
return "toolUse"
|
230
58
|
else:
|
231
59
|
return stop_reason
|
232
|
-
|
233
|
-
|
234
|
-
def mcp_content_to_anthropic_content(
|
235
|
-
content: TextContent | ImageContent | EmbeddedResource,
|
236
|
-
) -> ContentBlock:
|
237
|
-
if isinstance(content, TextContent):
|
238
|
-
return TextBlock(type=content.type, text=content.text)
|
239
|
-
elif isinstance(content, ImageContent):
|
240
|
-
# Best effort to convert an image to text (since there's no ImageBlock)
|
241
|
-
return TextBlock(type="text", text=f"{content.mimeType}:{content.data}")
|
242
|
-
elif isinstance(content, EmbeddedResource):
|
243
|
-
if isinstance(content.resource, TextResourceContents):
|
244
|
-
return TextBlock(type="text", text=content.resource.text)
|
245
|
-
else: # BlobResourceContents
|
246
|
-
return TextBlock(
|
247
|
-
type="text", text=f"{content.resource.mimeType}:{content.resource.blob}"
|
248
|
-
)
|
249
|
-
else:
|
250
|
-
# Last effort to convert the content to a string
|
251
|
-
return TextBlock(type="text", text=str(content))
|
252
|
-
|
253
|
-
|
254
|
-
def to_string(obj: BaseModel | dict) -> str:
|
255
|
-
if isinstance(obj, BaseModel):
|
256
|
-
return obj.model_dump_json()
|
257
|
-
else:
|
258
|
-
return json.dumps(obj)
|
@@ -1,26 +1,15 @@
|
|
1
|
-
from typing import
|
2
|
-
|
1
|
+
from typing import Dict, Any
|
2
|
+
|
3
3
|
from openai.types.chat import (
|
4
4
|
ChatCompletionMessage,
|
5
|
-
ChatCompletionUserMessageParam,
|
6
|
-
ChatCompletionAssistantMessageParam,
|
7
|
-
ChatCompletionMessageParam,
|
8
|
-
ChatCompletionContentPartTextParam,
|
9
|
-
ChatCompletionContentPartParam,
|
10
|
-
ChatCompletionContentPartRefusalParam,
|
11
5
|
)
|
12
6
|
|
13
7
|
from mcp.types import (
|
14
8
|
PromptMessage,
|
15
|
-
TextContent,
|
16
|
-
ImageContent,
|
17
|
-
EmbeddedResource,
|
18
|
-
TextResourceContents,
|
19
9
|
)
|
20
10
|
|
21
11
|
from mcp_agent.workflows.llm.sampling_format_converter import (
|
22
12
|
SamplingFormatConverter,
|
23
|
-
typed_dict_extras,
|
24
13
|
)
|
25
14
|
|
26
15
|
from mcp_agent.logging.logger import get_logger
|
@@ -29,201 +18,14 @@ _logger = get_logger(__name__)
|
|
29
18
|
|
30
19
|
|
31
20
|
class OpenAISamplingConverter(
|
32
|
-
SamplingFormatConverter[
|
21
|
+
SamplingFormatConverter[Dict[str, Any], ChatCompletionMessage]
|
33
22
|
):
|
34
|
-
"""
|
35
|
-
Convert between OpenAI and MCP types.
|
36
|
-
"""
|
37
|
-
|
38
|
-
@classmethod
|
39
|
-
def from_sampling_result(cls, result: CreateMessageResult) -> ChatCompletionMessage:
|
40
|
-
"""Convert an MCP message result to an OpenAI ChatCompletionMessage."""
|
41
|
-
# Basic implementation - would need to be expanded
|
42
|
-
|
43
|
-
if result.role != "assistant":
|
44
|
-
raise ValueError(
|
45
|
-
f"Expected role to be 'assistant' but got '{result.role}' instead."
|
46
|
-
)
|
47
|
-
# TODO -- add image support for sampling
|
48
|
-
return ChatCompletionMessage(
|
49
|
-
role=result.role,
|
50
|
-
content=result.content.text or "image",
|
51
|
-
)
|
52
|
-
|
53
|
-
@classmethod
|
54
|
-
def to_sampling_result(cls, result: ChatCompletionMessage) -> CreateMessageResult:
|
55
|
-
"""Convert an OpenAI ChatCompletionMessage to an MCP message result."""
|
56
|
-
content = result.content
|
57
|
-
if content is None:
|
58
|
-
content = ""
|
59
|
-
|
60
|
-
return CreateMessageResult(
|
61
|
-
role=result.role,
|
62
|
-
content=TextContent(type="text", text=content),
|
63
|
-
model="unknown", # Model is required by CreateMessageResult
|
64
|
-
)
|
65
|
-
|
66
|
-
@classmethod
|
67
|
-
def from_sampling_message(
|
68
|
-
cls, param: SamplingMessage
|
69
|
-
) -> ChatCompletionMessageParam:
|
70
|
-
if param.role == "assistant":
|
71
|
-
return ChatCompletionAssistantMessageParam(
|
72
|
-
role="assistant",
|
73
|
-
content=mcp_to_openai_blocks(param.content),
|
74
|
-
)
|
75
|
-
elif param.role == "user":
|
76
|
-
return ChatCompletionUserMessageParam(
|
77
|
-
role="user",
|
78
|
-
content=mcp_to_openai_blocks(param.content),
|
79
|
-
)
|
80
|
-
else:
|
81
|
-
raise ValueError(
|
82
|
-
f"Unexpected role: {param.role}, MCP only supports 'assistant' and 'user'"
|
83
|
-
)
|
84
|
-
|
85
|
-
@classmethod
|
86
|
-
def to_sampling_message(cls, param: ChatCompletionMessageParam) -> SamplingMessage:
|
87
|
-
contents = openai_to_mcp_blocks(param)
|
88
|
-
|
89
|
-
# TODO: saqadri - the mcp_content can have multiple elements
|
90
|
-
# while sampling message content has a single content element
|
91
|
-
# Right now we error out if there are > 1 elements in mcp_content
|
92
|
-
# We need to handle this case properly going forward
|
93
|
-
if len(contents) > 1:
|
94
|
-
raise NotImplementedError(
|
95
|
-
"Multiple content elements in a single message are not supported"
|
96
|
-
)
|
97
|
-
mcp_content: TextContent | ImageContent | EmbeddedResource = contents[0]
|
98
|
-
|
99
|
-
if param["role"] == "assistant":
|
100
|
-
return SamplingMessage(
|
101
|
-
role="assistant",
|
102
|
-
content=mcp_content,
|
103
|
-
**typed_dict_extras(param, ["role", "content"]),
|
104
|
-
)
|
105
|
-
elif param["role"] == "user":
|
106
|
-
return SamplingMessage(
|
107
|
-
role="user",
|
108
|
-
content=mcp_content,
|
109
|
-
**typed_dict_extras(param, ["role", "content"]),
|
110
|
-
)
|
111
|
-
elif param.role == "tool":
|
112
|
-
raise NotImplementedError(
|
113
|
-
"Tool messages are not supported in SamplingMessage yet"
|
114
|
-
)
|
115
|
-
elif param.role == "system":
|
116
|
-
raise NotImplementedError(
|
117
|
-
"System messages are not supported in SamplingMessage yet"
|
118
|
-
)
|
119
|
-
elif param.role == "developer":
|
120
|
-
raise NotImplementedError(
|
121
|
-
"Developer messages are not supported in SamplingMessage yet"
|
122
|
-
)
|
123
|
-
elif param.role == "function":
|
124
|
-
raise NotImplementedError(
|
125
|
-
"Function messages are not supported in SamplingMessage yet"
|
126
|
-
)
|
127
|
-
else:
|
128
|
-
raise ValueError(
|
129
|
-
f"Unexpected role: {param.role}, MCP only supports 'assistant', 'user', 'tool', 'system', 'developer', and 'function'"
|
130
|
-
)
|
131
|
-
|
132
23
|
@classmethod
|
133
|
-
def from_prompt_message(cls, message: PromptMessage) ->
|
134
|
-
"""Convert an MCP PromptMessage to an OpenAI
|
135
|
-
|
136
|
-
|
137
|
-
if hasattr(message.content, "text")
|
138
|
-
else str(message.content)
|
139
|
-
)
|
140
|
-
|
141
|
-
return {
|
142
|
-
"role": message.role,
|
143
|
-
"content": content_text,
|
144
|
-
}
|
145
|
-
|
146
|
-
|
147
|
-
def mcp_to_openai_blocks(
|
148
|
-
content: TextContent | ImageContent | EmbeddedResource,
|
149
|
-
) -> ChatCompletionContentPartTextParam:
|
150
|
-
if isinstance(content, list):
|
151
|
-
# Handle list of content items
|
152
|
-
return ChatCompletionContentPartTextParam(
|
153
|
-
type="text",
|
154
|
-
text="\n".join(mcp_to_openai_blocks(c) for c in content),
|
155
|
-
)
|
156
|
-
|
157
|
-
if isinstance(content, TextContent):
|
158
|
-
return ChatCompletionContentPartTextParam(type="text", text=content.text)
|
159
|
-
elif isinstance(content, ImageContent):
|
160
|
-
# Best effort to convert an image to text
|
161
|
-
return ChatCompletionContentPartTextParam(
|
162
|
-
type="text", text=f"{content.mimeType}:{content.data}"
|
24
|
+
def from_prompt_message(cls, message: PromptMessage) -> Dict[str, Any]:
|
25
|
+
"""Convert an MCP PromptMessage to an OpenAI message dict."""
|
26
|
+
from mcp_agent.workflows.llm.providers.multipart_converter_openai import (
|
27
|
+
OpenAIConverter,
|
163
28
|
)
|
164
|
-
elif isinstance(content, EmbeddedResource):
|
165
|
-
if isinstance(content.resource, TextResourceContents):
|
166
|
-
return ChatCompletionContentPartTextParam(
|
167
|
-
type="text", text=content.resource.text
|
168
|
-
)
|
169
|
-
else: # BlobResourceContents
|
170
|
-
return ChatCompletionContentPartTextParam(
|
171
|
-
type="text", text=f"{content.resource.mimeType}:{content.resource.blob}"
|
172
|
-
)
|
173
|
-
else:
|
174
|
-
# Last effort to convert the content to a string
|
175
|
-
return ChatCompletionContentPartTextParam(type="text", text=str(content))
|
176
|
-
|
177
|
-
|
178
|
-
def openai_to_mcp_blocks(
|
179
|
-
content: str
|
180
|
-
| Iterable[ChatCompletionContentPartParam | ChatCompletionContentPartRefusalParam],
|
181
|
-
) -> Iterable[TextContent | ImageContent | EmbeddedResource]:
|
182
|
-
mcp_content = []
|
183
|
-
|
184
|
-
if isinstance(content, str):
|
185
|
-
mcp_content = [TextContent(type="text", text=content)]
|
186
|
-
|
187
|
-
else:
|
188
|
-
mcp_content = [TextContent(type="text", text=content["content"])]
|
189
|
-
|
190
|
-
return mcp_content
|
191
|
-
|
192
|
-
# # TODO: saqadri - this is a best effort conversion, we should handle all possible content types
|
193
|
-
# for c in content["content"]:
|
194
|
-
# # TODO: evalstate, need to go through all scenarios here
|
195
|
-
# if isinstance(c, str):
|
196
|
-
# mcp_content.append(TextContent(type="text", text=c))
|
197
|
-
# break
|
198
29
|
|
199
|
-
|
200
|
-
|
201
|
-
# TextContent(
|
202
|
-
# type="text", text=c.text, **typed_dict_extras(c, ["text"])
|
203
|
-
# )
|
204
|
-
# )
|
205
|
-
# elif (
|
206
|
-
# c.type == "image_url"
|
207
|
-
# ): # isinstance(c, ChatCompletionContentPartImageParam):
|
208
|
-
# raise NotImplementedError("Image content conversion not implemented")
|
209
|
-
# # TODO: saqadri - need to download the image into a base64-encoded string
|
210
|
-
# # Download image from c.image_url
|
211
|
-
# # return ImageContent(
|
212
|
-
# # type="image",
|
213
|
-
# # data=downloaded_image,
|
214
|
-
# # **c
|
215
|
-
# # )
|
216
|
-
# elif (
|
217
|
-
# c.type == "input_audio"
|
218
|
-
# ): # isinstance(c, ChatCompletionContentPartInputAudioParam):
|
219
|
-
# raise NotImplementedError("Audio content conversion not implemented")
|
220
|
-
# elif (
|
221
|
-
# c.type == "refusal"
|
222
|
-
# ): # isinstance(c, ChatCompletionContentPartRefusalParam):
|
223
|
-
# mcp_content.append(
|
224
|
-
# TextContent(
|
225
|
-
# type="text", text=c.refusal, **typed_dict_extras(c, ["refusal"])
|
226
|
-
# )
|
227
|
-
# )
|
228
|
-
# else:
|
229
|
-
# raise ValueError(f"Unexpected content type: {c.type}")
|
30
|
+
# Use the full-featured OpenAI converter for consistent handling
|
31
|
+
return OpenAIConverter.convert_prompt_message_to_openai(message)
|
@@ -0,0 +1,124 @@
|
|
1
|
+
"""
|
2
|
+
Simplified converter between MCP sampling types and PromptMessageMultipart.
|
3
|
+
This replaces the more complex provider-specific converters with direct conversions.
|
4
|
+
"""
|
5
|
+
|
6
|
+
from typing import List, Optional
|
7
|
+
|
8
|
+
from mcp.types import (
|
9
|
+
CreateMessageRequestParams,
|
10
|
+
CreateMessageResult,
|
11
|
+
SamplingMessage,
|
12
|
+
StopReason,
|
13
|
+
TextContent,
|
14
|
+
)
|
15
|
+
|
16
|
+
from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
|
17
|
+
from mcp_agent.mcp.interfaces import RequestParams
|
18
|
+
|
19
|
+
|
20
|
+
class SamplingConverter:
|
21
|
+
"""
|
22
|
+
Simplified converter between MCP sampling types and internal LLM types.
|
23
|
+
|
24
|
+
This handles converting between:
|
25
|
+
- SamplingMessage and PromptMessageMultipart
|
26
|
+
- CreateMessageRequestParams and RequestParams
|
27
|
+
- LLM responses and CreateMessageResult
|
28
|
+
"""
|
29
|
+
|
30
|
+
@staticmethod
|
31
|
+
def sampling_message_to_prompt_message(
|
32
|
+
message: SamplingMessage,
|
33
|
+
) -> PromptMessageMultipart:
|
34
|
+
"""
|
35
|
+
Convert a SamplingMessage to a PromptMessageMultipart.
|
36
|
+
|
37
|
+
Args:
|
38
|
+
message: MCP SamplingMessage to convert
|
39
|
+
|
40
|
+
Returns:
|
41
|
+
PromptMessageMultipart suitable for use with LLMs
|
42
|
+
"""
|
43
|
+
return PromptMessageMultipart(role=message.role, content=[message.content])
|
44
|
+
|
45
|
+
@staticmethod
|
46
|
+
def extract_request_params(params: CreateMessageRequestParams) -> RequestParams:
|
47
|
+
"""
|
48
|
+
Extract parameters from CreateMessageRequestParams into RequestParams.
|
49
|
+
|
50
|
+
Args:
|
51
|
+
params: MCP request parameters
|
52
|
+
|
53
|
+
Returns:
|
54
|
+
RequestParams suitable for use with LLM.generate_prompt
|
55
|
+
"""
|
56
|
+
return RequestParams(
|
57
|
+
maxTokens=params.maxTokens,
|
58
|
+
systemPrompt=params.systemPrompt,
|
59
|
+
temperature=params.temperature,
|
60
|
+
stopSequences=params.stopSequences,
|
61
|
+
modelPreferences=params.modelPreferences,
|
62
|
+
# Add any other parameters needed
|
63
|
+
)
|
64
|
+
|
65
|
+
@staticmethod
|
66
|
+
def create_message_result(
|
67
|
+
response: str, model: str, stop_reason: StopReason = "endTurn"
|
68
|
+
) -> CreateMessageResult:
|
69
|
+
"""
|
70
|
+
Create a CreateMessageResult from an LLM response.
|
71
|
+
|
72
|
+
Args:
|
73
|
+
response: Text response from the LLM
|
74
|
+
model: Model identifier
|
75
|
+
stop_reason: Reason generation stopped
|
76
|
+
|
77
|
+
Returns:
|
78
|
+
CreateMessageResult suitable for returning to MCP
|
79
|
+
"""
|
80
|
+
return CreateMessageResult(
|
81
|
+
role="assistant",
|
82
|
+
content=TextContent(type="text", text=response),
|
83
|
+
model=model,
|
84
|
+
stopReason=stop_reason,
|
85
|
+
)
|
86
|
+
|
87
|
+
@staticmethod
|
88
|
+
def error_result(
|
89
|
+
error_message: str, model: Optional[str] = None
|
90
|
+
) -> CreateMessageResult:
|
91
|
+
"""
|
92
|
+
Create an error result.
|
93
|
+
|
94
|
+
Args:
|
95
|
+
error_message: Error message text
|
96
|
+
model: Optional model identifier
|
97
|
+
|
98
|
+
Returns:
|
99
|
+
CreateMessageResult with error information
|
100
|
+
"""
|
101
|
+
return CreateMessageResult(
|
102
|
+
role="assistant",
|
103
|
+
content=TextContent(type="text", text=error_message),
|
104
|
+
model=model or "unknown",
|
105
|
+
stopReason="error",
|
106
|
+
)
|
107
|
+
|
108
|
+
@staticmethod
|
109
|
+
def convert_messages(
|
110
|
+
messages: List[SamplingMessage],
|
111
|
+
) -> List[PromptMessageMultipart]:
|
112
|
+
"""
|
113
|
+
Convert multiple SamplingMessages to PromptMessageMultipart objects.
|
114
|
+
|
115
|
+
Args:
|
116
|
+
messages: List of SamplingMessages to convert
|
117
|
+
|
118
|
+
Returns:
|
119
|
+
List of PromptMessageMultipart objects, each with a single content item
|
120
|
+
"""
|
121
|
+
return [
|
122
|
+
SamplingConverter.sampling_message_to_prompt_message(msg)
|
123
|
+
for msg in messages
|
124
|
+
]
|
@@ -1,6 +1,5 @@
|
|
1
1
|
from typing import Generic, List, Protocol, TypeVar
|
2
2
|
|
3
|
-
from mcp import CreateMessageResult, SamplingMessage
|
4
3
|
|
5
4
|
# Define type variables here instead of importing from augmented_llm
|
6
5
|
MessageParamT = TypeVar("MessageParamT")
|
@@ -13,22 +12,6 @@ MessageT = TypeVar("MessageT")
|
|
13
12
|
class SamplingFormatConverter(Protocol, Generic[MessageParamT, MessageT]):
|
14
13
|
"""Conversions between LLM provider and MCP types"""
|
15
14
|
|
16
|
-
@classmethod
|
17
|
-
def to_sampling_result(cls, result: MessageT) -> CreateMessageResult:
|
18
|
-
"""Convert an LLM response to an MCP message result type."""
|
19
|
-
|
20
|
-
@classmethod
|
21
|
-
def from_sampling_result(cls, result: CreateMessageResult) -> MessageT:
|
22
|
-
"""Convert an MCP message result to an LLM response type."""
|
23
|
-
|
24
|
-
@classmethod
|
25
|
-
def to_sampling_message(cls, param: MessageParamT) -> SamplingMessage:
|
26
|
-
"""Convert an LLM input to an MCP message (SamplingMessage) type."""
|
27
|
-
|
28
|
-
@classmethod
|
29
|
-
def from_sampling_message(cls, param: SamplingMessage) -> MessageParamT:
|
30
|
-
"""Convert an MCP message (SamplingMessage) to an LLM input type."""
|
31
|
-
|
32
15
|
@classmethod
|
33
16
|
def from_prompt_message(cls, message) -> MessageParamT:
|
34
17
|
"""Convert an MCP PromptMessage to a provider-specific message parameter."""
|