fast-agent-mcp 0.1.8__py3-none-any.whl → 0.1.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {fast_agent_mcp-0.1.8.dist-info → fast_agent_mcp-0.1.10.dist-info}/METADATA +27 -4
- {fast_agent_mcp-0.1.8.dist-info → fast_agent_mcp-0.1.10.dist-info}/RECORD +51 -30
- {fast_agent_mcp-0.1.8.dist-info → fast_agent_mcp-0.1.10.dist-info}/entry_points.txt +1 -0
- mcp_agent/agents/agent.py +114 -8
- mcp_agent/context.py +0 -2
- mcp_agent/core/agent_app.py +89 -13
- mcp_agent/core/factory.py +14 -13
- mcp_agent/core/fastagent.py +15 -5
- mcp_agent/core/mcp_content.py +222 -0
- mcp_agent/core/prompt.py +132 -0
- mcp_agent/core/proxies.py +79 -36
- mcp_agent/logging/listeners.py +3 -6
- mcp_agent/logging/transport.py +30 -3
- mcp_agent/mcp/mcp_agent_client_session.py +21 -145
- mcp_agent/mcp/mcp_aggregator.py +61 -12
- mcp_agent/mcp/mcp_connection_manager.py +0 -1
- mcp_agent/mcp/mime_utils.py +69 -0
- mcp_agent/mcp/prompt_message_multipart.py +64 -0
- mcp_agent/mcp/prompt_serialization.py +447 -0
- mcp_agent/mcp/prompts/__init__.py +0 -0
- mcp_agent/mcp/prompts/__main__.py +10 -0
- mcp_agent/mcp/prompts/prompt_server.py +509 -0
- mcp_agent/mcp/prompts/prompt_template.py +469 -0
- mcp_agent/mcp/resource_utils.py +223 -0
- mcp_agent/mcp/stdio.py +23 -15
- mcp_agent/mcp_server_registry.py +5 -2
- mcp_agent/resources/examples/internal/agent.py +1 -1
- mcp_agent/resources/examples/internal/fastagent.config.yaml +2 -2
- mcp_agent/resources/examples/internal/sizer.py +0 -5
- mcp_agent/resources/examples/prompting/__init__.py +3 -0
- mcp_agent/resources/examples/prompting/agent.py +23 -0
- mcp_agent/resources/examples/prompting/fastagent.config.yaml +44 -0
- mcp_agent/resources/examples/prompting/image_server.py +56 -0
- mcp_agent/resources/examples/workflows/orchestrator.py +3 -3
- mcp_agent/workflows/llm/anthropic_utils.py +101 -0
- mcp_agent/workflows/llm/augmented_llm.py +139 -66
- mcp_agent/workflows/llm/augmented_llm_anthropic.py +127 -251
- mcp_agent/workflows/llm/augmented_llm_openai.py +149 -305
- mcp_agent/workflows/llm/augmented_llm_passthrough.py +99 -1
- mcp_agent/workflows/llm/augmented_llm_playback.py +109 -0
- mcp_agent/workflows/llm/model_factory.py +20 -3
- mcp_agent/workflows/llm/openai_utils.py +65 -0
- mcp_agent/workflows/llm/providers/__init__.py +8 -0
- mcp_agent/workflows/llm/providers/multipart_converter_anthropic.py +348 -0
- mcp_agent/workflows/llm/providers/multipart_converter_openai.py +426 -0
- mcp_agent/workflows/llm/providers/openai_multipart.py +197 -0
- mcp_agent/workflows/llm/providers/sampling_converter_anthropic.py +258 -0
- mcp_agent/workflows/llm/providers/sampling_converter_openai.py +229 -0
- mcp_agent/workflows/llm/sampling_format_converter.py +39 -0
- mcp_agent/core/server_validation.py +0 -44
- mcp_agent/core/simulator_registry.py +0 -22
- mcp_agent/workflows/llm/enhanced_passthrough.py +0 -70
- {fast_agent_mcp-0.1.8.dist-info → fast_agent_mcp-0.1.10.dist-info}/WHEEL +0 -0
- {fast_agent_mcp-0.1.8.dist-info → fast_agent_mcp-0.1.10.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,258 @@
|
|
1
|
+
import json
|
2
|
+
from typing import Iterable, List
|
3
|
+
from mcp import CreateMessageResult, SamplingMessage, StopReason
|
4
|
+
from pydantic import BaseModel
|
5
|
+
from mcp_agent.workflows.llm.sampling_format_converter import SamplingFormatConverter
|
6
|
+
|
7
|
+
from mcp.types import (
|
8
|
+
PromptMessage,
|
9
|
+
EmbeddedResource,
|
10
|
+
ImageContent,
|
11
|
+
TextContent,
|
12
|
+
TextResourceContents,
|
13
|
+
)
|
14
|
+
|
15
|
+
from anthropic.types import (
|
16
|
+
ContentBlock,
|
17
|
+
DocumentBlockParam,
|
18
|
+
Message,
|
19
|
+
MessageParam,
|
20
|
+
ImageBlockParam,
|
21
|
+
TextBlock,
|
22
|
+
TextBlockParam,
|
23
|
+
ToolResultBlockParam,
|
24
|
+
ToolUseBlockParam,
|
25
|
+
)
|
26
|
+
|
27
|
+
from mcp_agent.logging.logger import get_logger
|
28
|
+
|
29
|
+
_logger = get_logger(__name__)
|
30
|
+
|
31
|
+
|
32
|
+
class AnthropicSamplingConverter(SamplingFormatConverter[MessageParam, Message]):
|
33
|
+
"""
|
34
|
+
Convert between Anthropic and MCP types.
|
35
|
+
"""
|
36
|
+
|
37
|
+
@classmethod
|
38
|
+
def from_sampling_result(cls, result: CreateMessageResult) -> Message:
|
39
|
+
# -> Message
|
40
|
+
if result.role != "assistant":
|
41
|
+
raise ValueError(
|
42
|
+
f"Expected role to be 'assistant' but got '{result.role}' instead."
|
43
|
+
)
|
44
|
+
|
45
|
+
return Message(
|
46
|
+
role="assistant",
|
47
|
+
type="message",
|
48
|
+
content=[mcp_content_to_anthropic_content(result.content)],
|
49
|
+
stop_reason=mcp_stop_reason_to_anthropic_stop_reason(result.stopReason),
|
50
|
+
model=result.model,
|
51
|
+
usage={"input_tokens": 0, "output_tokens": 0},
|
52
|
+
id="sampling_id",
|
53
|
+
# TODO -- incorporate usage info and message identity
|
54
|
+
)
|
55
|
+
|
56
|
+
@classmethod
|
57
|
+
def to_sampling_result(cls, result: Message) -> CreateMessageResult:
|
58
|
+
contents = anthropic_content_to_mcp_content(result.content)
|
59
|
+
if len(contents) > 1:
|
60
|
+
raise NotImplementedError(
|
61
|
+
"Multiple content elements in a single message are not supported in MCP yet"
|
62
|
+
)
|
63
|
+
mcp_content = contents[0]
|
64
|
+
|
65
|
+
# Create a dictionary with required fields
|
66
|
+
result_dict = {
|
67
|
+
"role": result.role,
|
68
|
+
"content": mcp_content,
|
69
|
+
"model": result.model,
|
70
|
+
"stopReason": anthropic_stop_reason_to_mcp_stop_reason(result.stop_reason),
|
71
|
+
}
|
72
|
+
|
73
|
+
# Add any other fields from the original message that might be needed
|
74
|
+
extras = result.model_dump(exclude={"role", "content", "model", "stop_reason"})
|
75
|
+
if extras:
|
76
|
+
# Only include compatible fields to avoid validation errors
|
77
|
+
# Skip fields that would cause validation issues with CreateMessageResult
|
78
|
+
safe_extras = {
|
79
|
+
k: v for k, v in extras.items() if k in CreateMessageResult.model_fields
|
80
|
+
}
|
81
|
+
result_dict.update(safe_extras)
|
82
|
+
|
83
|
+
return CreateMessageResult(**result_dict)
|
84
|
+
|
85
|
+
@classmethod
|
86
|
+
def from_sampling_message(cls, param: SamplingMessage) -> MessageParam:
|
87
|
+
extras = param.model_dump(exclude={"role", "content"})
|
88
|
+
return MessageParam(
|
89
|
+
role=param.role,
|
90
|
+
content=[mcp_content_to_anthropic_content(param.content)],
|
91
|
+
**extras,
|
92
|
+
)
|
93
|
+
|
94
|
+
@classmethod
|
95
|
+
def to_sampling_message(cls, param: MessageParam) -> SamplingMessage:
|
96
|
+
# Implement the conversion from ChatCompletionMessage to MCP message param
|
97
|
+
|
98
|
+
contents = anthropic_content_to_mcp_content(param["content"])
|
99
|
+
|
100
|
+
# TODO: saqadri - the mcp_content can have multiple elements
|
101
|
+
# while sampling message content has a single content element
|
102
|
+
# Right now we error out if there are > 1 elements in mcp_content
|
103
|
+
# We need to handle this case properly going forward
|
104
|
+
if len(contents) > 1:
|
105
|
+
raise NotImplementedError(
|
106
|
+
"Multiple content elements in a single message are not supported"
|
107
|
+
)
|
108
|
+
mcp_content = contents[0]
|
109
|
+
|
110
|
+
# Only include fields that are valid for SamplingMessage
|
111
|
+
extras = {
|
112
|
+
k: v
|
113
|
+
for k, v in param.items()
|
114
|
+
if k not in ["role", "content"] and k in SamplingMessage.model_fields
|
115
|
+
}
|
116
|
+
|
117
|
+
return SamplingMessage(
|
118
|
+
role=param["role"],
|
119
|
+
content=mcp_content,
|
120
|
+
**extras,
|
121
|
+
)
|
122
|
+
|
123
|
+
@classmethod
|
124
|
+
def from_prompt_message(cls, message: PromptMessage) -> MessageParam:
|
125
|
+
"""Convert an MCP PromptMessage to an Anthropic MessageParam."""
|
126
|
+
|
127
|
+
# Extract content text
|
128
|
+
content_text = (
|
129
|
+
message.content.text
|
130
|
+
if hasattr(message.content, "text")
|
131
|
+
else str(message.content)
|
132
|
+
)
|
133
|
+
|
134
|
+
# Extract extras for flexibility
|
135
|
+
extras = message.model_dump(exclude={"role", "content"})
|
136
|
+
|
137
|
+
# Handle based on role
|
138
|
+
if message.role == "user":
|
139
|
+
return {"role": "user", "content": content_text, **extras}
|
140
|
+
elif message.role == "assistant":
|
141
|
+
return {
|
142
|
+
"role": "assistant",
|
143
|
+
"content": [{"type": "text", "text": content_text}],
|
144
|
+
**extras,
|
145
|
+
}
|
146
|
+
else:
|
147
|
+
# Fall back to user for any unrecognized role, including "system"
|
148
|
+
_logger.warning(
|
149
|
+
f"Unsupported role '{message.role}' in PromptMessage. Falling back to 'user' role."
|
150
|
+
)
|
151
|
+
return {
|
152
|
+
"role": "user",
|
153
|
+
"content": f"[{message.role.upper()}] {content_text}",
|
154
|
+
**extras,
|
155
|
+
}
|
156
|
+
|
157
|
+
|
158
|
+
def anthropic_content_to_mcp_content(
|
159
|
+
content: str
|
160
|
+
| Iterable[
|
161
|
+
TextBlockParam
|
162
|
+
| ImageBlockParam
|
163
|
+
| ToolUseBlockParam
|
164
|
+
| ToolResultBlockParam
|
165
|
+
| DocumentBlockParam
|
166
|
+
| ContentBlock
|
167
|
+
],
|
168
|
+
) -> List[TextContent | ImageContent | EmbeddedResource]:
|
169
|
+
mcp_content = []
|
170
|
+
|
171
|
+
if isinstance(content, str):
|
172
|
+
mcp_content.append(TextContent(type="text", text=content))
|
173
|
+
else:
|
174
|
+
for block in content:
|
175
|
+
if block.type == "text":
|
176
|
+
mcp_content.append(TextContent(type="text", text=block.text))
|
177
|
+
elif block.type == "image":
|
178
|
+
raise NotImplementedError("Image content conversion not implemented")
|
179
|
+
elif block.type == "tool_use":
|
180
|
+
# Best effort to convert a tool use to text (since there's no ToolUseContent)
|
181
|
+
mcp_content.append(
|
182
|
+
TextContent(
|
183
|
+
type="text",
|
184
|
+
text=to_string(block),
|
185
|
+
)
|
186
|
+
)
|
187
|
+
elif block.type == "tool_result":
|
188
|
+
# Best effort to convert a tool result to text (since there's no ToolResultContent)
|
189
|
+
mcp_content.append(
|
190
|
+
TextContent(
|
191
|
+
type="text",
|
192
|
+
text=to_string(block),
|
193
|
+
)
|
194
|
+
)
|
195
|
+
elif block.type == "document":
|
196
|
+
raise NotImplementedError("Document content conversion not implemented")
|
197
|
+
else:
|
198
|
+
# Last effort to convert the content to a string
|
199
|
+
mcp_content.append(TextContent(type="text", text=str(block)))
|
200
|
+
|
201
|
+
return mcp_content
|
202
|
+
|
203
|
+
|
204
|
+
def mcp_stop_reason_to_anthropic_stop_reason(stop_reason: StopReason):
|
205
|
+
if not stop_reason:
|
206
|
+
return None
|
207
|
+
elif stop_reason == "endTurn":
|
208
|
+
return "end_turn"
|
209
|
+
elif stop_reason == "maxTokens":
|
210
|
+
return "max_tokens"
|
211
|
+
elif stop_reason == "stopSequence":
|
212
|
+
return "stop_sequence"
|
213
|
+
elif stop_reason == "toolUse":
|
214
|
+
return "tool_use"
|
215
|
+
else:
|
216
|
+
return stop_reason
|
217
|
+
|
218
|
+
|
219
|
+
def anthropic_stop_reason_to_mcp_stop_reason(stop_reason: str) -> StopReason:
|
220
|
+
if not stop_reason:
|
221
|
+
return None
|
222
|
+
elif stop_reason == "end_turn":
|
223
|
+
return "endTurn"
|
224
|
+
elif stop_reason == "max_tokens":
|
225
|
+
return "maxTokens"
|
226
|
+
elif stop_reason == "stop_sequence":
|
227
|
+
return "stopSequence"
|
228
|
+
elif stop_reason == "tool_use":
|
229
|
+
return "toolUse"
|
230
|
+
else:
|
231
|
+
return stop_reason
|
232
|
+
|
233
|
+
|
234
|
+
def mcp_content_to_anthropic_content(
|
235
|
+
content: TextContent | ImageContent | EmbeddedResource,
|
236
|
+
) -> ContentBlock:
|
237
|
+
if isinstance(content, TextContent):
|
238
|
+
return TextBlock(type=content.type, text=content.text)
|
239
|
+
elif isinstance(content, ImageContent):
|
240
|
+
# Best effort to convert an image to text (since there's no ImageBlock)
|
241
|
+
return TextBlock(type="text", text=f"{content.mimeType}:{content.data}")
|
242
|
+
elif isinstance(content, EmbeddedResource):
|
243
|
+
if isinstance(content.resource, TextResourceContents):
|
244
|
+
return TextBlock(type="text", text=content.resource.text)
|
245
|
+
else: # BlobResourceContents
|
246
|
+
return TextBlock(
|
247
|
+
type="text", text=f"{content.resource.mimeType}:{content.resource.blob}"
|
248
|
+
)
|
249
|
+
else:
|
250
|
+
# Last effort to convert the content to a string
|
251
|
+
return TextBlock(type="text", text=str(content))
|
252
|
+
|
253
|
+
|
254
|
+
def to_string(obj: BaseModel | dict) -> str:
|
255
|
+
if isinstance(obj, BaseModel):
|
256
|
+
return obj.model_dump_json()
|
257
|
+
else:
|
258
|
+
return json.dumps(obj)
|
@@ -0,0 +1,229 @@
|
|
1
|
+
from typing import Iterable
|
2
|
+
from mcp import CreateMessageResult, SamplingMessage
|
3
|
+
from openai.types.chat import (
|
4
|
+
ChatCompletionMessage,
|
5
|
+
ChatCompletionUserMessageParam,
|
6
|
+
ChatCompletionAssistantMessageParam,
|
7
|
+
ChatCompletionMessageParam,
|
8
|
+
ChatCompletionContentPartTextParam,
|
9
|
+
ChatCompletionContentPartParam,
|
10
|
+
ChatCompletionContentPartRefusalParam,
|
11
|
+
)
|
12
|
+
|
13
|
+
from mcp.types import (
|
14
|
+
PromptMessage,
|
15
|
+
TextContent,
|
16
|
+
ImageContent,
|
17
|
+
EmbeddedResource,
|
18
|
+
TextResourceContents,
|
19
|
+
)
|
20
|
+
|
21
|
+
from mcp_agent.workflows.llm.sampling_format_converter import (
|
22
|
+
SamplingFormatConverter,
|
23
|
+
typed_dict_extras,
|
24
|
+
)
|
25
|
+
|
26
|
+
from mcp_agent.logging.logger import get_logger
|
27
|
+
|
28
|
+
_logger = get_logger(__name__)
|
29
|
+
|
30
|
+
|
31
|
+
class OpenAISamplingConverter(
|
32
|
+
SamplingFormatConverter[ChatCompletionMessageParam, ChatCompletionMessage]
|
33
|
+
):
|
34
|
+
"""
|
35
|
+
Convert between OpenAI and MCP types.
|
36
|
+
"""
|
37
|
+
|
38
|
+
@classmethod
|
39
|
+
def from_sampling_result(cls, result: CreateMessageResult) -> ChatCompletionMessage:
|
40
|
+
"""Convert an MCP message result to an OpenAI ChatCompletionMessage."""
|
41
|
+
# Basic implementation - would need to be expanded
|
42
|
+
|
43
|
+
if result.role != "assistant":
|
44
|
+
raise ValueError(
|
45
|
+
f"Expected role to be 'assistant' but got '{result.role}' instead."
|
46
|
+
)
|
47
|
+
# TODO -- add image support for sampling
|
48
|
+
return ChatCompletionMessage(
|
49
|
+
role=result.role,
|
50
|
+
content=result.content.text or "image",
|
51
|
+
)
|
52
|
+
|
53
|
+
@classmethod
|
54
|
+
def to_sampling_result(cls, result: ChatCompletionMessage) -> CreateMessageResult:
|
55
|
+
"""Convert an OpenAI ChatCompletionMessage to an MCP message result."""
|
56
|
+
content = result.content
|
57
|
+
if content is None:
|
58
|
+
content = ""
|
59
|
+
|
60
|
+
return CreateMessageResult(
|
61
|
+
role=result.role,
|
62
|
+
content=TextContent(type="text", text=content),
|
63
|
+
model="unknown", # Model is required by CreateMessageResult
|
64
|
+
)
|
65
|
+
|
66
|
+
@classmethod
|
67
|
+
def from_sampling_message(
|
68
|
+
cls, param: SamplingMessage
|
69
|
+
) -> ChatCompletionMessageParam:
|
70
|
+
if param.role == "assistant":
|
71
|
+
return ChatCompletionAssistantMessageParam(
|
72
|
+
role="assistant",
|
73
|
+
content=mcp_to_openai_blocks(param.content),
|
74
|
+
)
|
75
|
+
elif param.role == "user":
|
76
|
+
return ChatCompletionUserMessageParam(
|
77
|
+
role="user",
|
78
|
+
content=mcp_to_openai_blocks(param.content),
|
79
|
+
)
|
80
|
+
else:
|
81
|
+
raise ValueError(
|
82
|
+
f"Unexpected role: {param.role}, MCP only supports 'assistant' and 'user'"
|
83
|
+
)
|
84
|
+
|
85
|
+
@classmethod
|
86
|
+
def to_sampling_message(cls, param: ChatCompletionMessageParam) -> SamplingMessage:
|
87
|
+
contents = openai_to_mcp_blocks(param)
|
88
|
+
|
89
|
+
# TODO: saqadri - the mcp_content can have multiple elements
|
90
|
+
# while sampling message content has a single content element
|
91
|
+
# Right now we error out if there are > 1 elements in mcp_content
|
92
|
+
# We need to handle this case properly going forward
|
93
|
+
if len(contents) > 1:
|
94
|
+
raise NotImplementedError(
|
95
|
+
"Multiple content elements in a single message are not supported"
|
96
|
+
)
|
97
|
+
mcp_content: TextContent | ImageContent | EmbeddedResource = contents[0]
|
98
|
+
|
99
|
+
if param["role"] == "assistant":
|
100
|
+
return SamplingMessage(
|
101
|
+
role="assistant",
|
102
|
+
content=mcp_content,
|
103
|
+
**typed_dict_extras(param, ["role", "content"]),
|
104
|
+
)
|
105
|
+
elif param["role"] == "user":
|
106
|
+
return SamplingMessage(
|
107
|
+
role="user",
|
108
|
+
content=mcp_content,
|
109
|
+
**typed_dict_extras(param, ["role", "content"]),
|
110
|
+
)
|
111
|
+
elif param.role == "tool":
|
112
|
+
raise NotImplementedError(
|
113
|
+
"Tool messages are not supported in SamplingMessage yet"
|
114
|
+
)
|
115
|
+
elif param.role == "system":
|
116
|
+
raise NotImplementedError(
|
117
|
+
"System messages are not supported in SamplingMessage yet"
|
118
|
+
)
|
119
|
+
elif param.role == "developer":
|
120
|
+
raise NotImplementedError(
|
121
|
+
"Developer messages are not supported in SamplingMessage yet"
|
122
|
+
)
|
123
|
+
elif param.role == "function":
|
124
|
+
raise NotImplementedError(
|
125
|
+
"Function messages are not supported in SamplingMessage yet"
|
126
|
+
)
|
127
|
+
else:
|
128
|
+
raise ValueError(
|
129
|
+
f"Unexpected role: {param.role}, MCP only supports 'assistant', 'user', 'tool', 'system', 'developer', and 'function'"
|
130
|
+
)
|
131
|
+
|
132
|
+
@classmethod
|
133
|
+
def from_prompt_message(cls, message: PromptMessage) -> ChatCompletionMessageParam:
|
134
|
+
"""Convert an MCP PromptMessage to an OpenAI ChatCompletionMessageParam."""
|
135
|
+
content_text = (
|
136
|
+
message.content.text
|
137
|
+
if hasattr(message.content, "text")
|
138
|
+
else str(message.content)
|
139
|
+
)
|
140
|
+
|
141
|
+
return {
|
142
|
+
"role": message.role,
|
143
|
+
"content": content_text,
|
144
|
+
}
|
145
|
+
|
146
|
+
|
147
|
+
def mcp_to_openai_blocks(
|
148
|
+
content: TextContent | ImageContent | EmbeddedResource,
|
149
|
+
) -> ChatCompletionContentPartTextParam:
|
150
|
+
if isinstance(content, list):
|
151
|
+
# Handle list of content items
|
152
|
+
return ChatCompletionContentPartTextParam(
|
153
|
+
type="text",
|
154
|
+
text="\n".join(mcp_to_openai_blocks(c) for c in content),
|
155
|
+
)
|
156
|
+
|
157
|
+
if isinstance(content, TextContent):
|
158
|
+
return ChatCompletionContentPartTextParam(type="text", text=content.text)
|
159
|
+
elif isinstance(content, ImageContent):
|
160
|
+
# Best effort to convert an image to text
|
161
|
+
return ChatCompletionContentPartTextParam(
|
162
|
+
type="text", text=f"{content.mimeType}:{content.data}"
|
163
|
+
)
|
164
|
+
elif isinstance(content, EmbeddedResource):
|
165
|
+
if isinstance(content.resource, TextResourceContents):
|
166
|
+
return ChatCompletionContentPartTextParam(
|
167
|
+
type="text", text=content.resource.text
|
168
|
+
)
|
169
|
+
else: # BlobResourceContents
|
170
|
+
return ChatCompletionContentPartTextParam(
|
171
|
+
type="text", text=f"{content.resource.mimeType}:{content.resource.blob}"
|
172
|
+
)
|
173
|
+
else:
|
174
|
+
# Last effort to convert the content to a string
|
175
|
+
return ChatCompletionContentPartTextParam(type="text", text=str(content))
|
176
|
+
|
177
|
+
|
178
|
+
def openai_to_mcp_blocks(
|
179
|
+
content: str
|
180
|
+
| Iterable[ChatCompletionContentPartParam | ChatCompletionContentPartRefusalParam],
|
181
|
+
) -> Iterable[TextContent | ImageContent | EmbeddedResource]:
|
182
|
+
mcp_content = []
|
183
|
+
|
184
|
+
if isinstance(content, str):
|
185
|
+
mcp_content = [TextContent(type="text", text=content)]
|
186
|
+
|
187
|
+
else:
|
188
|
+
mcp_content = [TextContent(type="text", text=content["content"])]
|
189
|
+
|
190
|
+
return mcp_content
|
191
|
+
|
192
|
+
# # TODO: saqadri - this is a best effort conversion, we should handle all possible content types
|
193
|
+
# for c in content["content"]:
|
194
|
+
# # TODO: evalstate, need to go through all scenarios here
|
195
|
+
# if isinstance(c, str):
|
196
|
+
# mcp_content.append(TextContent(type="text", text=c))
|
197
|
+
# break
|
198
|
+
|
199
|
+
# if c.type == "text": # isinstance(c, ChatCompletionContentPartTextParam):
|
200
|
+
# mcp_content.append(
|
201
|
+
# TextContent(
|
202
|
+
# type="text", text=c.text, **typed_dict_extras(c, ["text"])
|
203
|
+
# )
|
204
|
+
# )
|
205
|
+
# elif (
|
206
|
+
# c.type == "image_url"
|
207
|
+
# ): # isinstance(c, ChatCompletionContentPartImageParam):
|
208
|
+
# raise NotImplementedError("Image content conversion not implemented")
|
209
|
+
# # TODO: saqadri - need to download the image into a base64-encoded string
|
210
|
+
# # Download image from c.image_url
|
211
|
+
# # return ImageContent(
|
212
|
+
# # type="image",
|
213
|
+
# # data=downloaded_image,
|
214
|
+
# # **c
|
215
|
+
# # )
|
216
|
+
# elif (
|
217
|
+
# c.type == "input_audio"
|
218
|
+
# ): # isinstance(c, ChatCompletionContentPartInputAudioParam):
|
219
|
+
# raise NotImplementedError("Audio content conversion not implemented")
|
220
|
+
# elif (
|
221
|
+
# c.type == "refusal"
|
222
|
+
# ): # isinstance(c, ChatCompletionContentPartRefusalParam):
|
223
|
+
# mcp_content.append(
|
224
|
+
# TextContent(
|
225
|
+
# type="text", text=c.refusal, **typed_dict_extras(c, ["refusal"])
|
226
|
+
# )
|
227
|
+
# )
|
228
|
+
# else:
|
229
|
+
# raise ValueError(f"Unexpected content type: {c.type}")
|
@@ -0,0 +1,39 @@
|
|
1
|
+
from typing import Generic, List, Protocol, TypeVar
|
2
|
+
|
3
|
+
from mcp import CreateMessageResult, SamplingMessage
|
4
|
+
|
5
|
+
# Define type variables here instead of importing from augmented_llm
|
6
|
+
MessageParamT = TypeVar("MessageParamT")
|
7
|
+
"""A type representing an input message to an LLM."""
|
8
|
+
|
9
|
+
MessageT = TypeVar("MessageT")
|
10
|
+
"""A type representing an output message from an LLM."""
|
11
|
+
|
12
|
+
|
13
|
+
class SamplingFormatConverter(Protocol, Generic[MessageParamT, MessageT]):
|
14
|
+
"""Conversions between LLM provider and MCP types"""
|
15
|
+
|
16
|
+
@classmethod
|
17
|
+
def to_sampling_result(cls, result: MessageT) -> CreateMessageResult:
|
18
|
+
"""Convert an LLM response to an MCP message result type."""
|
19
|
+
|
20
|
+
@classmethod
|
21
|
+
def from_sampling_result(cls, result: CreateMessageResult) -> MessageT:
|
22
|
+
"""Convert an MCP message result to an LLM response type."""
|
23
|
+
|
24
|
+
@classmethod
|
25
|
+
def to_sampling_message(cls, param: MessageParamT) -> SamplingMessage:
|
26
|
+
"""Convert an LLM input to an MCP message (SamplingMessage) type."""
|
27
|
+
|
28
|
+
@classmethod
|
29
|
+
def from_sampling_message(cls, param: SamplingMessage) -> MessageParamT:
|
30
|
+
"""Convert an MCP message (SamplingMessage) to an LLM input type."""
|
31
|
+
|
32
|
+
@classmethod
|
33
|
+
def from_prompt_message(cls, message) -> MessageParamT:
|
34
|
+
"""Convert an MCP PromptMessage to a provider-specific message parameter."""
|
35
|
+
|
36
|
+
|
37
|
+
def typed_dict_extras(d: dict, exclude: List[str]):
|
38
|
+
extras = {k: v for k, v in d.items() if k not in exclude}
|
39
|
+
return extras
|
@@ -1,44 +0,0 @@
|
|
1
|
-
"""FastAgent validation methods."""
|
2
|
-
|
3
|
-
from mcp_agent.core.exceptions import ServerConfigError
|
4
|
-
|
5
|
-
|
6
|
-
def _validate_server_references(self) -> None:
|
7
|
-
"""
|
8
|
-
Validate that all server references in agent configurations exist in config.
|
9
|
-
Raises ServerConfigError if any referenced servers are not defined.
|
10
|
-
"""
|
11
|
-
# First check if any agents need servers
|
12
|
-
agents_needing_servers = {
|
13
|
-
name: agent_data["config"].servers
|
14
|
-
for name, agent_data in self.agents.items()
|
15
|
-
if agent_data["config"].servers
|
16
|
-
}
|
17
|
-
|
18
|
-
if not agents_needing_servers:
|
19
|
-
return # No validation needed
|
20
|
-
|
21
|
-
# If we need servers, verify MCP config exists
|
22
|
-
if not hasattr(self.context.config, "mcp"):
|
23
|
-
raise ServerConfigError(
|
24
|
-
"MCP configuration missing",
|
25
|
-
"Agents require server access but no MCP configuration found.\n"
|
26
|
-
"Add an 'mcp' section to your configuration file.",
|
27
|
-
)
|
28
|
-
|
29
|
-
if not self.context.config.mcp.servers:
|
30
|
-
raise ServerConfigError(
|
31
|
-
"No MCP servers configured",
|
32
|
-
"Agents require server access but no servers are defined.\n"
|
33
|
-
"Add server definitions under mcp.servers in your configuration file.",
|
34
|
-
)
|
35
|
-
|
36
|
-
# Now check each agent's servers exist
|
37
|
-
available_servers = set(self.context.config.mcp.servers.keys())
|
38
|
-
for name, servers in agents_needing_servers.items():
|
39
|
-
missing = [s for s in servers if s not in available_servers]
|
40
|
-
if missing:
|
41
|
-
raise ServerConfigError(
|
42
|
-
f"Missing server configuration for agent '{name}'",
|
43
|
-
f"The following servers are referenced but not defined in config: {', '.join(missing)}",
|
44
|
-
)
|
@@ -1,22 +0,0 @@
|
|
1
|
-
from typing import Optional, Any
|
2
|
-
|
3
|
-
|
4
|
-
class SimulatorRegistry:
|
5
|
-
"""Registry to access simulator instances for testing assertions"""
|
6
|
-
|
7
|
-
_instances = {}
|
8
|
-
|
9
|
-
@classmethod
|
10
|
-
def register(cls, name: str, simulator: "Any"):
|
11
|
-
"""Register a simulator instance"""
|
12
|
-
cls._instances[name] = simulator
|
13
|
-
|
14
|
-
@classmethod
|
15
|
-
def get(cls, name: str) -> Optional["Any"]:
|
16
|
-
"""Get a simulator by name"""
|
17
|
-
return cls._instances.get(name)
|
18
|
-
|
19
|
-
@classmethod
|
20
|
-
def clear(cls):
|
21
|
-
"""Clear registry (useful between tests)"""
|
22
|
-
cls._instances.clear()
|
@@ -1,70 +0,0 @@
|
|
1
|
-
# src/mcp_agent/workflows/llm/enhanced_passthrough.py
|
2
|
-
|
3
|
-
|
4
|
-
import datetime
|
5
|
-
from typing import List, Optional, Union
|
6
|
-
from mcp_agent.core.simulator_registry import SimulatorRegistry
|
7
|
-
from mcp_agent.workflows.llm.augmented_llm import (
|
8
|
-
AugmentedLLM,
|
9
|
-
MessageParamT,
|
10
|
-
RequestParams,
|
11
|
-
)
|
12
|
-
|
13
|
-
|
14
|
-
class EnhancedPassthroughLLM(AugmentedLLM):
|
15
|
-
"""Enhanced passthrough LLM for testing parameter handling and workflows"""
|
16
|
-
|
17
|
-
def __init__(self, name: str = "Simulator", context=None, **kwargs):
|
18
|
-
super().__init__(name=name, context=context, **kwargs)
|
19
|
-
self.simulation_mode = kwargs.get("simulation_mode", "passthrough")
|
20
|
-
self.request_log = []
|
21
|
-
self.last_request_params = None
|
22
|
-
|
23
|
-
# Register this instance with the registry
|
24
|
-
SimulatorRegistry.register(self.name, self)
|
25
|
-
|
26
|
-
async def generate_str(
|
27
|
-
self,
|
28
|
-
message: Union[str, MessageParamT, List[MessageParamT]],
|
29
|
-
request_params: Optional[RequestParams] = None,
|
30
|
-
) -> str:
|
31
|
-
"""Capture parameters and log the request"""
|
32
|
-
# Store for assertion testing
|
33
|
-
self.last_request_params = request_params
|
34
|
-
|
35
|
-
# Log the request
|
36
|
-
self.request_log.append(
|
37
|
-
{
|
38
|
-
"timestamp": datetime.now().isoformat(),
|
39
|
-
"message": str(message),
|
40
|
-
"request_params": request_params.model_dump()
|
41
|
-
if request_params
|
42
|
-
else None,
|
43
|
-
}
|
44
|
-
)
|
45
|
-
|
46
|
-
# Display for debugging
|
47
|
-
self.show_user_message(str(message), model="simulator", chat_turn=0)
|
48
|
-
|
49
|
-
# Simulate response
|
50
|
-
result = f"[SIMULATOR] Response to: {message}"
|
51
|
-
await self.show_assistant_message(result, title="SIMULATOR")
|
52
|
-
|
53
|
-
return result
|
54
|
-
|
55
|
-
# Other generate methods with similar parameter capture
|
56
|
-
|
57
|
-
def get_parameter_usage_report(self):
|
58
|
-
"""Generate report of parameter usage"""
|
59
|
-
param_usage = {}
|
60
|
-
|
61
|
-
for req in self.request_log:
|
62
|
-
params = req.get("request_params", {})
|
63
|
-
if params:
|
64
|
-
for key, value in params.items():
|
65
|
-
if key not in param_usage:
|
66
|
-
param_usage[key] = {"count": 0, "values": set()}
|
67
|
-
param_usage[key]["count"] += 1
|
68
|
-
param_usage[key]["values"].add(str(value))
|
69
|
-
|
70
|
-
return {"total_requests": len(self.request_log), "parameter_usage": param_usage}
|
File without changes
|
File without changes
|