fast-agent-mcp 0.1.11__py3-none-any.whl → 0.1.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {fast_agent_mcp-0.1.11.dist-info → fast_agent_mcp-0.1.13.dist-info}/METADATA +1 -1
- fast_agent_mcp-0.1.13.dist-info/RECORD +164 -0
- mcp_agent/agents/agent.py +37 -102
- mcp_agent/app.py +16 -27
- mcp_agent/cli/commands/bootstrap.py +22 -52
- mcp_agent/cli/commands/config.py +4 -4
- mcp_agent/cli/commands/setup.py +11 -26
- mcp_agent/cli/main.py +6 -9
- mcp_agent/cli/terminal.py +2 -2
- mcp_agent/config.py +1 -5
- mcp_agent/context.py +13 -26
- mcp_agent/context_dependent.py +3 -7
- mcp_agent/core/agent_app.py +46 -122
- mcp_agent/core/agent_types.py +29 -2
- mcp_agent/core/agent_utils.py +3 -5
- mcp_agent/core/decorators.py +6 -14
- mcp_agent/core/enhanced_prompt.py +25 -52
- mcp_agent/core/error_handling.py +1 -1
- mcp_agent/core/exceptions.py +8 -8
- mcp_agent/core/factory.py +30 -72
- mcp_agent/core/fastagent.py +48 -88
- mcp_agent/core/mcp_content.py +10 -19
- mcp_agent/core/prompt.py +8 -15
- mcp_agent/core/proxies.py +34 -25
- mcp_agent/core/request_params.py +46 -0
- mcp_agent/core/types.py +6 -6
- mcp_agent/core/validation.py +16 -16
- mcp_agent/executor/decorator_registry.py +11 -23
- mcp_agent/executor/executor.py +8 -17
- mcp_agent/executor/task_registry.py +2 -4
- mcp_agent/executor/temporal.py +28 -74
- mcp_agent/executor/workflow.py +3 -5
- mcp_agent/executor/workflow_signal.py +17 -29
- mcp_agent/human_input/handler.py +4 -9
- mcp_agent/human_input/types.py +2 -3
- mcp_agent/logging/events.py +1 -5
- mcp_agent/logging/json_serializer.py +7 -6
- mcp_agent/logging/listeners.py +20 -23
- mcp_agent/logging/logger.py +15 -17
- mcp_agent/logging/rich_progress.py +10 -8
- mcp_agent/logging/tracing.py +4 -6
- mcp_agent/logging/transport.py +24 -24
- mcp_agent/mcp/gen_client.py +4 -12
- mcp_agent/mcp/interfaces.py +107 -88
- mcp_agent/mcp/mcp_agent_client_session.py +11 -19
- mcp_agent/mcp/mcp_agent_server.py +8 -10
- mcp_agent/mcp/mcp_aggregator.py +49 -122
- mcp_agent/mcp/mcp_connection_manager.py +16 -37
- mcp_agent/mcp/prompt_message_multipart.py +12 -18
- mcp_agent/mcp/prompt_serialization.py +13 -38
- mcp_agent/mcp/prompts/prompt_load.py +99 -0
- mcp_agent/mcp/prompts/prompt_server.py +21 -128
- mcp_agent/mcp/prompts/prompt_template.py +20 -42
- mcp_agent/mcp/resource_utils.py +8 -17
- mcp_agent/mcp/sampling.py +62 -64
- mcp_agent/mcp/stdio.py +11 -8
- mcp_agent/mcp_server/__init__.py +1 -1
- mcp_agent/mcp_server/agent_server.py +10 -17
- mcp_agent/mcp_server_registry.py +13 -35
- mcp_agent/resources/examples/data-analysis/analysis-campaign.py +1 -1
- mcp_agent/resources/examples/data-analysis/analysis.py +1 -1
- mcp_agent/resources/examples/data-analysis/slides.py +110 -0
- mcp_agent/resources/examples/internal/agent.py +2 -1
- mcp_agent/resources/examples/internal/job.py +2 -1
- mcp_agent/resources/examples/internal/prompt_category.py +1 -1
- mcp_agent/resources/examples/internal/prompt_sizing.py +3 -5
- mcp_agent/resources/examples/internal/sizer.py +2 -1
- mcp_agent/resources/examples/internal/social.py +2 -1
- mcp_agent/resources/examples/mcp_researcher/researcher-eval.py +1 -1
- mcp_agent/resources/examples/prompting/__init__.py +1 -1
- mcp_agent/resources/examples/prompting/agent.py +2 -1
- mcp_agent/resources/examples/prompting/image_server.py +5 -11
- mcp_agent/resources/examples/researcher/researcher-eval.py +1 -1
- mcp_agent/resources/examples/researcher/researcher-imp.py +3 -4
- mcp_agent/resources/examples/researcher/researcher.py +2 -1
- mcp_agent/resources/examples/workflows/agent_build.py +2 -1
- mcp_agent/resources/examples/workflows/chaining.py +2 -1
- mcp_agent/resources/examples/workflows/evaluator.py +2 -1
- mcp_agent/resources/examples/workflows/human_input.py +2 -1
- mcp_agent/resources/examples/workflows/orchestrator.py +2 -1
- mcp_agent/resources/examples/workflows/parallel.py +2 -1
- mcp_agent/resources/examples/workflows/router.py +2 -1
- mcp_agent/resources/examples/workflows/sse.py +1 -1
- mcp_agent/telemetry/usage_tracking.py +2 -1
- mcp_agent/ui/console_display.py +17 -41
- mcp_agent/workflows/embedding/embedding_base.py +1 -4
- mcp_agent/workflows/embedding/embedding_cohere.py +2 -2
- mcp_agent/workflows/embedding/embedding_openai.py +4 -13
- mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py +23 -57
- mcp_agent/workflows/intent_classifier/intent_classifier_base.py +5 -8
- mcp_agent/workflows/intent_classifier/intent_classifier_embedding.py +7 -11
- mcp_agent/workflows/intent_classifier/intent_classifier_embedding_cohere.py +4 -8
- mcp_agent/workflows/intent_classifier/intent_classifier_embedding_openai.py +4 -8
- mcp_agent/workflows/intent_classifier/intent_classifier_llm.py +11 -22
- mcp_agent/workflows/intent_classifier/intent_classifier_llm_anthropic.py +3 -3
- mcp_agent/workflows/intent_classifier/intent_classifier_llm_openai.py +4 -6
- mcp_agent/workflows/llm/anthropic_utils.py +8 -29
- mcp_agent/workflows/llm/augmented_llm.py +94 -332
- mcp_agent/workflows/llm/augmented_llm_anthropic.py +43 -76
- mcp_agent/workflows/llm/augmented_llm_openai.py +46 -100
- mcp_agent/workflows/llm/augmented_llm_passthrough.py +42 -20
- mcp_agent/workflows/llm/augmented_llm_playback.py +8 -6
- mcp_agent/workflows/llm/memory.py +103 -0
- mcp_agent/workflows/llm/model_factory.py +9 -21
- mcp_agent/workflows/llm/openai_utils.py +1 -1
- mcp_agent/workflows/llm/prompt_utils.py +39 -27
- mcp_agent/workflows/llm/providers/multipart_converter_anthropic.py +246 -184
- mcp_agent/workflows/llm/providers/multipart_converter_openai.py +212 -202
- mcp_agent/workflows/llm/providers/openai_multipart.py +19 -61
- mcp_agent/workflows/llm/providers/sampling_converter_anthropic.py +11 -212
- mcp_agent/workflows/llm/providers/sampling_converter_openai.py +13 -215
- mcp_agent/workflows/llm/sampling_converter.py +117 -0
- mcp_agent/workflows/llm/sampling_format_converter.py +12 -29
- mcp_agent/workflows/orchestrator/orchestrator.py +24 -67
- mcp_agent/workflows/orchestrator/orchestrator_models.py +14 -40
- mcp_agent/workflows/parallel/fan_in.py +17 -47
- mcp_agent/workflows/parallel/fan_out.py +6 -12
- mcp_agent/workflows/parallel/parallel_llm.py +9 -26
- mcp_agent/workflows/router/router_base.py +29 -59
- mcp_agent/workflows/router/router_embedding.py +11 -25
- mcp_agent/workflows/router/router_embedding_cohere.py +2 -2
- mcp_agent/workflows/router/router_embedding_openai.py +2 -2
- mcp_agent/workflows/router/router_llm.py +12 -28
- mcp_agent/workflows/swarm/swarm.py +20 -48
- mcp_agent/workflows/swarm/swarm_anthropic.py +2 -2
- mcp_agent/workflows/swarm/swarm_openai.py +2 -2
- fast_agent_mcp-0.1.11.dist-info/RECORD +0 -160
- mcp_agent/workflows/llm/llm_selector.py +0 -345
- {fast_agent_mcp-0.1.11.dist-info → fast_agent_mcp-0.1.13.dist-info}/WHEEL +0 -0
- {fast_agent_mcp-0.1.11.dist-info → fast_agent_mcp-0.1.13.dist-info}/entry_points.txt +0 -0
- {fast_agent_mcp-0.1.11.dist-info → fast_agent_mcp-0.1.13.dist-info}/licenses/LICENSE +0 -0
@@ -1,38 +1,42 @@
|
|
1
|
-
from typing import List,
|
1
|
+
from typing import TYPE_CHECKING, List, Optional, Sequence, Union
|
2
2
|
|
3
|
+
from anthropic.types import (
|
4
|
+
Base64ImageSourceParam,
|
5
|
+
Base64PDFSourceParam,
|
6
|
+
ContentBlockParam,
|
7
|
+
DocumentBlockParam,
|
8
|
+
ImageBlockParam,
|
9
|
+
MessageParam,
|
10
|
+
PlainTextSourceParam,
|
11
|
+
TextBlockParam,
|
12
|
+
ToolResultBlockParam,
|
13
|
+
URLImageSourceParam,
|
14
|
+
URLPDFSourceParam,
|
15
|
+
)
|
3
16
|
from mcp.types import (
|
4
|
-
|
5
|
-
ImageContent,
|
6
|
-
EmbeddedResource,
|
17
|
+
BlobResourceContents,
|
7
18
|
CallToolResult,
|
19
|
+
EmbeddedResource,
|
20
|
+
ImageContent,
|
21
|
+
PromptMessage,
|
22
|
+
TextContent,
|
8
23
|
TextResourceContents,
|
9
|
-
BlobResourceContents,
|
10
24
|
)
|
11
|
-
|
12
|
-
from mcp_agent.
|
25
|
+
|
26
|
+
from mcp_agent.logging.logger import get_logger
|
13
27
|
from mcp_agent.mcp.mime_utils import (
|
14
28
|
guess_mime_type,
|
15
|
-
is_text_mime_type,
|
16
29
|
is_image_mime_type,
|
30
|
+
is_text_mime_type,
|
17
31
|
)
|
18
|
-
|
19
|
-
from anthropic.types import (
|
20
|
-
MessageParam,
|
21
|
-
TextBlockParam,
|
22
|
-
ImageBlockParam,
|
23
|
-
DocumentBlockParam,
|
24
|
-
Base64ImageSourceParam,
|
25
|
-
URLImageSourceParam,
|
26
|
-
Base64PDFSourceParam,
|
27
|
-
URLPDFSourceParam,
|
28
|
-
PlainTextSourceParam,
|
29
|
-
ToolResultBlockParam,
|
30
|
-
ContentBlockParam,
|
31
|
-
)
|
32
|
-
from mcp_agent.logging.logger import get_logger
|
32
|
+
from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
|
33
33
|
from mcp_agent.mcp.resource_utils import extract_title_from_uri
|
34
34
|
|
35
|
+
if TYPE_CHECKING:
|
36
|
+
from pydantic import AnyUrl
|
37
|
+
|
35
38
|
_logger = get_logger("multipart_converter_anthropic")
|
39
|
+
|
36
40
|
# List of image MIME types supported by Anthropic API
|
37
41
|
SUPPORTED_IMAGE_MIME_TYPES = {"image/jpeg", "image/png", "image/gif", "image/webp"}
|
38
42
|
|
@@ -41,59 +45,16 @@ class AnthropicConverter:
|
|
41
45
|
"""Converts MCP message types to Anthropic API format."""
|
42
46
|
|
43
47
|
@staticmethod
|
44
|
-
def
|
45
|
-
|
46
|
-
documentMode: bool = True,
|
47
|
-
) -> List[ContentBlockParam]:
|
48
|
-
"""
|
49
|
-
Helper method to convert a list of content items to Anthropic format.
|
48
|
+
def _is_supported_image_type(mime_type: str) -> bool:
|
49
|
+
"""Check if the given MIME type is supported by Anthropic's image API.
|
50
50
|
|
51
51
|
Args:
|
52
|
-
|
53
|
-
documentMode: Whether to convert text resources to document blocks (True) or text blocks (False)
|
52
|
+
mime_type: The MIME type to check
|
54
53
|
|
55
54
|
Returns:
|
56
|
-
|
55
|
+
True if the MIME type is supported, False otherwise
|
57
56
|
"""
|
58
|
-
|
59
|
-
anthropic_blocks: List[ContentBlockParam] = []
|
60
|
-
|
61
|
-
for content_item in content_items:
|
62
|
-
if isinstance(content_item, TextContent):
|
63
|
-
anthropic_block = AnthropicConverter._convert_text_content(content_item)
|
64
|
-
anthropic_blocks.append(anthropic_block)
|
65
|
-
elif isinstance(content_item, ImageContent):
|
66
|
-
# Check if image MIME type is supported
|
67
|
-
if content_item.mimeType not in SUPPORTED_IMAGE_MIME_TYPES:
|
68
|
-
anthropic_block = AnthropicConverter._format_fail_message(
|
69
|
-
content_item, content_item.mimeType
|
70
|
-
)
|
71
|
-
else:
|
72
|
-
anthropic_block = AnthropicConverter._convert_image_content(
|
73
|
-
content_item
|
74
|
-
)
|
75
|
-
anthropic_blocks.append(anthropic_block)
|
76
|
-
elif isinstance(content_item, EmbeddedResource):
|
77
|
-
anthropic_block = AnthropicConverter._convert_embedded_resource(
|
78
|
-
content_item, documentMode
|
79
|
-
)
|
80
|
-
anthropic_blocks.append(anthropic_block)
|
81
|
-
|
82
|
-
return anthropic_blocks
|
83
|
-
|
84
|
-
@staticmethod
|
85
|
-
def _format_fail_message(
|
86
|
-
resource: Union[TextContent, ImageContent, EmbeddedResource], mimetype: str
|
87
|
-
) -> TextBlockParam:
|
88
|
-
"""Create a fallback text block for unsupported resource types"""
|
89
|
-
fallback_text: str = f"Unknown resource with format {mimetype}"
|
90
|
-
if resource.type == "image":
|
91
|
-
fallback_text = f"Image with unsupported format '{mimetype}' ({len(resource.data)} characters)"
|
92
|
-
if isinstance(resource, EmbeddedResource):
|
93
|
-
if isinstance(resource.resource, BlobResourceContents):
|
94
|
-
fallback_text = f"Embedded Resource {resource.resource.uri._url} with unsupported format {resource.resource.mimeType} ({len(resource.resource.blob)} characters)"
|
95
|
-
|
96
|
-
return TextBlockParam(type="text", text=fallback_text)
|
57
|
+
return mime_type in SUPPORTED_IMAGE_MIME_TYPES
|
97
58
|
|
98
59
|
@staticmethod
|
99
60
|
def convert_to_anthropic(multipart_msg: PromptMessageMultipart) -> MessageParam:
|
@@ -106,13 +67,14 @@ class AnthropicConverter:
|
|
106
67
|
Returns:
|
107
68
|
An Anthropic API MessageParam object
|
108
69
|
"""
|
109
|
-
|
110
|
-
|
70
|
+
role = multipart_msg.role
|
71
|
+
|
72
|
+
# Handle empty content case - create an empty list instead of a text block
|
73
|
+
if not multipart_msg.content:
|
74
|
+
return MessageParam(role=role, content=[])
|
111
75
|
|
112
76
|
# Convert content blocks
|
113
|
-
anthropic_blocks
|
114
|
-
AnthropicConverter._convert_content_items(multipart_msg.content)
|
115
|
-
)
|
77
|
+
anthropic_blocks = AnthropicConverter._convert_content_items(multipart_msg.content, document_mode=True)
|
116
78
|
|
117
79
|
# Filter blocks based on role (assistant can only have text blocks)
|
118
80
|
if role == "assistant":
|
@@ -121,95 +83,119 @@ class AnthropicConverter:
|
|
121
83
|
if block.get("type") == "text":
|
122
84
|
text_blocks.append(block)
|
123
85
|
else:
|
124
|
-
_logger.warning(
|
125
|
-
f"Removing non-text block from assistant message: {block.get('type')}"
|
126
|
-
)
|
86
|
+
_logger.warning(f"Removing non-text block from assistant message: {block.get('type')}")
|
127
87
|
anthropic_blocks = text_blocks
|
128
88
|
|
129
89
|
# Create the Anthropic message
|
130
90
|
return MessageParam(role=role, content=anthropic_blocks)
|
131
91
|
|
132
92
|
@staticmethod
|
133
|
-
def
|
134
|
-
"""
|
135
|
-
|
93
|
+
def convert_prompt_message_to_anthropic(message: PromptMessage) -> MessageParam:
|
94
|
+
"""
|
95
|
+
Convert a standard PromptMessage to Anthropic API format.
|
136
96
|
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
97
|
+
Args:
|
98
|
+
message: The PromptMessage to convert
|
99
|
+
|
100
|
+
Returns:
|
101
|
+
An Anthropic API MessageParam object
|
102
|
+
"""
|
103
|
+
# Convert the PromptMessage to a PromptMessageMultipart containing a single content item
|
104
|
+
multipart = PromptMessageMultipart(role=message.role, content=[message.content])
|
105
|
+
|
106
|
+
# Use the existing conversion method
|
107
|
+
return AnthropicConverter.convert_to_anthropic(multipart)
|
147
108
|
|
148
109
|
@staticmethod
|
149
|
-
def
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
110
|
+
def _convert_content_items(
|
111
|
+
content_items: Sequence[Union[TextContent, ImageContent, EmbeddedResource]],
|
112
|
+
document_mode: bool = True,
|
113
|
+
) -> List[ContentBlockParam]:
|
114
|
+
"""
|
115
|
+
Convert a list of content items to Anthropic content blocks.
|
154
116
|
|
155
|
-
|
156
|
-
|
117
|
+
Args:
|
118
|
+
content_items: Sequence of MCP content items
|
119
|
+
document_mode: Whether to convert text resources to document blocks (True) or text blocks (False)
|
157
120
|
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
|
121
|
+
Returns:
|
122
|
+
List of Anthropic content blocks
|
123
|
+
"""
|
124
|
+
anthropic_blocks: List[ContentBlockParam] = []
|
125
|
+
|
126
|
+
for content_item in content_items:
|
127
|
+
if isinstance(content_item, TextContent):
|
128
|
+
anthropic_blocks.append(TextBlockParam(type="text", text=content_item.text))
|
129
|
+
|
130
|
+
elif isinstance(content_item, ImageContent):
|
131
|
+
# Check if image MIME type is supported
|
132
|
+
if not AnthropicConverter._is_supported_image_type(content_item.mimeType):
|
133
|
+
anthropic_blocks.append(
|
134
|
+
TextBlockParam(
|
135
|
+
type="text",
|
136
|
+
text=f"Image with unsupported format '{content_item.mimeType}' ({len(content_item.data)} bytes)",
|
137
|
+
)
|
138
|
+
)
|
139
|
+
else:
|
140
|
+
anthropic_blocks.append(
|
141
|
+
ImageBlockParam(
|
142
|
+
type="image",
|
143
|
+
source=Base64ImageSourceParam(
|
144
|
+
type="base64",
|
145
|
+
media_type=content_item.mimeType,
|
146
|
+
data=content_item.data,
|
147
|
+
),
|
148
|
+
)
|
149
|
+
)
|
150
|
+
|
151
|
+
elif isinstance(content_item, EmbeddedResource):
|
152
|
+
block = AnthropicConverter._convert_embedded_resource(content_item, document_mode)
|
153
|
+
anthropic_blocks.append(block)
|
154
|
+
|
155
|
+
return anthropic_blocks
|
162
156
|
|
163
157
|
@staticmethod
|
164
158
|
def _convert_embedded_resource(
|
165
159
|
resource: EmbeddedResource,
|
166
|
-
|
160
|
+
document_mode: bool = True,
|
167
161
|
) -> ContentBlockParam:
|
168
|
-
"""
|
162
|
+
"""
|
163
|
+
Convert EmbeddedResource to appropriate Anthropic block type.
|
169
164
|
|
170
165
|
Args:
|
171
166
|
resource: The embedded resource to convert
|
172
|
-
|
167
|
+
document_mode: Whether to convert text resources to Document blocks (True) or Text blocks (False)
|
173
168
|
|
174
169
|
Returns:
|
175
170
|
An appropriate ContentBlockParam for the resource
|
176
171
|
"""
|
177
|
-
resource_content
|
178
|
-
|
179
|
-
)
|
180
|
-
|
181
|
-
|
172
|
+
resource_content = resource.resource
|
173
|
+
uri: Optional[AnyUrl] = getattr(resource_content, "uri", None)
|
174
|
+
is_url: bool = uri and uri.scheme in ("http", "https")
|
175
|
+
|
176
|
+
# Determine MIME type
|
182
177
|
mime_type = AnthropicConverter._determine_mime_type(resource_content)
|
178
|
+
|
183
179
|
# Extract title from URI
|
184
|
-
title = extract_title_from_uri(uri) if uri else
|
180
|
+
title = extract_title_from_uri(uri) if uri else "resource"
|
185
181
|
|
186
|
-
#
|
182
|
+
# Convert based on MIME type
|
187
183
|
if mime_type == "image/svg+xml":
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
if is_image_mime_type(mime_type):
|
195
|
-
# Check if image MIME type is supported
|
196
|
-
if mime_type not in SUPPORTED_IMAGE_MIME_TYPES:
|
197
|
-
return AnthropicConverter._format_fail_message(resource, mime_type)
|
198
|
-
|
199
|
-
# Handle supported image types
|
184
|
+
return AnthropicConverter._convert_svg_resource(resource_content)
|
185
|
+
|
186
|
+
elif is_image_mime_type(mime_type):
|
187
|
+
if not AnthropicConverter._is_supported_image_type(mime_type):
|
188
|
+
return AnthropicConverter._create_fallback_text(f"Image with unsupported format '{mime_type}'", resource)
|
189
|
+
|
200
190
|
if is_url:
|
201
|
-
return ImageBlockParam(
|
202
|
-
type="image", source=URLImageSourceParam(type="url", url=str(uri))
|
203
|
-
)
|
191
|
+
return ImageBlockParam(type="image", source=URLImageSourceParam(type="url", url=str(uri)))
|
204
192
|
elif hasattr(resource_content, "blob"):
|
205
193
|
return ImageBlockParam(
|
206
194
|
type="image",
|
207
|
-
source=Base64ImageSourceParam(
|
208
|
-
type="base64", media_type=mime_type, data=resource_content.blob
|
209
|
-
),
|
195
|
+
source=Base64ImageSourceParam(type="base64", media_type=mime_type, data=resource_content.blob),
|
210
196
|
)
|
197
|
+
return AnthropicConverter._create_fallback_text("Image missing data", resource)
|
211
198
|
|
212
|
-
# Handle PDF resources
|
213
199
|
elif mime_type == "application/pdf":
|
214
200
|
if is_url:
|
215
201
|
return DocumentBlockParam(
|
@@ -227,34 +213,104 @@ class AnthropicConverter:
|
|
227
213
|
data=resource_content.blob,
|
228
214
|
),
|
229
215
|
)
|
216
|
+
return TextBlockParam(type="text", text=f"[PDF resource missing data: {title}]")
|
230
217
|
|
231
|
-
# Handle text resources (default for all other text mime types)
|
232
218
|
elif is_text_mime_type(mime_type):
|
233
|
-
if
|
234
|
-
|
235
|
-
|
236
|
-
|
237
|
-
|
238
|
-
|
239
|
-
|
240
|
-
|
241
|
-
|
242
|
-
|
243
|
-
|
244
|
-
|
245
|
-
|
246
|
-
|
219
|
+
if not hasattr(resource_content, "text"):
|
220
|
+
return TextBlockParam(
|
221
|
+
type="text",
|
222
|
+
text=f"[Text content could not be extracted from {title}]",
|
223
|
+
)
|
224
|
+
|
225
|
+
# Create document block when in document mode
|
226
|
+
if document_mode:
|
227
|
+
return DocumentBlockParam(
|
228
|
+
type="document",
|
229
|
+
title=title,
|
230
|
+
source=PlainTextSourceParam(
|
231
|
+
type="text",
|
232
|
+
media_type="text/plain",
|
233
|
+
data=resource_content.text,
|
234
|
+
),
|
235
|
+
)
|
236
|
+
|
237
|
+
# Return as simple text block when not in document mode
|
238
|
+
return TextBlockParam(type="text", text=resource_content.text)
|
247
239
|
|
248
240
|
# Default fallback - convert to text if possible
|
249
241
|
if hasattr(resource_content, "text"):
|
250
242
|
return TextBlockParam(type="text", text=resource_content.text)
|
251
243
|
|
252
|
-
|
244
|
+
# This is for binary resources - match the format expected by the test
|
245
|
+
if isinstance(resource.resource, BlobResourceContents) and hasattr(resource.resource, "blob"):
|
246
|
+
blob_length = len(resource.resource.blob)
|
247
|
+
return TextBlockParam(
|
248
|
+
type="text",
|
249
|
+
text=f"Embedded Resource {uri._url} with unsupported format {mime_type} ({blob_length} characters)",
|
250
|
+
)
|
251
|
+
|
252
|
+
return AnthropicConverter._create_fallback_text(f"Unsupported resource ({mime_type})", resource)
|
253
|
+
|
254
|
+
@staticmethod
|
255
|
+
def _determine_mime_type(
|
256
|
+
resource: Union[TextResourceContents, BlobResourceContents],
|
257
|
+
) -> str:
|
258
|
+
"""
|
259
|
+
Determine the MIME type of a resource.
|
260
|
+
|
261
|
+
Args:
|
262
|
+
resource: The resource to check
|
263
|
+
|
264
|
+
Returns:
|
265
|
+
The MIME type as a string
|
266
|
+
"""
|
267
|
+
if getattr(resource, "mimeType", None):
|
268
|
+
return resource.mimeType
|
269
|
+
|
270
|
+
if getattr(resource, "uri", None):
|
271
|
+
return guess_mime_type(resource.uri.serialize_url)
|
272
|
+
|
273
|
+
if hasattr(resource, "blob"):
|
274
|
+
return "application/octet-stream"
|
275
|
+
|
276
|
+
return "text/plain"
|
277
|
+
|
278
|
+
@staticmethod
|
279
|
+
def _convert_svg_resource(resource_content) -> TextBlockParam:
|
280
|
+
"""
|
281
|
+
Convert SVG resource to text block with XML code formatting.
|
282
|
+
|
283
|
+
Args:
|
284
|
+
resource_content: The resource content containing SVG data
|
285
|
+
|
286
|
+
Returns:
|
287
|
+
A TextBlockParam with formatted SVG content
|
288
|
+
"""
|
289
|
+
if hasattr(resource_content, "text"):
|
290
|
+
svg_content = resource_content.text
|
291
|
+
return TextBlockParam(type="text", text=f"```xml\n{svg_content}\n```")
|
292
|
+
return TextBlockParam(type="text", text="[SVG content could not be extracted]")
|
293
|
+
|
294
|
+
@staticmethod
|
295
|
+
def _create_fallback_text(message: str, resource: Union[TextContent, ImageContent, EmbeddedResource]) -> TextBlockParam:
|
296
|
+
"""
|
297
|
+
Create a fallback text block for unsupported resource types.
|
298
|
+
|
299
|
+
Args:
|
300
|
+
message: The fallback message
|
301
|
+
resource: The resource that couldn't be converted
|
302
|
+
|
303
|
+
Returns:
|
304
|
+
A TextBlockParam with the fallback message
|
305
|
+
"""
|
306
|
+
if isinstance(resource, EmbeddedResource) and hasattr(resource.resource, "uri"):
|
307
|
+
uri = resource.resource.uri
|
308
|
+
return TextBlockParam(type="text", text=f"[{message}: {uri._url}]")
|
309
|
+
|
310
|
+
return TextBlockParam(type="text", text=f"[{message}]")
|
253
311
|
|
254
312
|
@staticmethod
|
255
|
-
def convert_tool_result_to_anthropic(
|
256
|
-
tool_result: CallToolResult, tool_use_id: str
|
257
|
-
) -> ToolResultBlockParam:
|
313
|
+
def convert_tool_result_to_anthropic(tool_result: CallToolResult, tool_use_id: str) -> ToolResultBlockParam:
|
258
314
|
"""
|
259
315
|
Convert an MCP CallToolResult to an Anthropic ToolResultBlockParam.
|
260
316
|
|
@@ -265,28 +321,22 @@ class AnthropicConverter:
|
|
265
321
|
Returns:
|
266
322
|
An Anthropic ToolResultBlockParam ready to be included in a user message
|
267
323
|
"""
|
268
|
-
# For tool results,
|
324
|
+
# For tool results, always use document_mode=False to get text blocks instead of document blocks
|
269
325
|
anthropic_content = []
|
270
326
|
|
271
327
|
for item in tool_result.content:
|
272
328
|
if isinstance(item, EmbeddedResource):
|
273
329
|
# For embedded resources, always use text mode in tool results
|
274
|
-
resource_block = AnthropicConverter._convert_embedded_resource(
|
275
|
-
item, documentMode=False
|
276
|
-
)
|
330
|
+
resource_block = AnthropicConverter._convert_embedded_resource(item, document_mode=False)
|
277
331
|
anthropic_content.append(resource_block)
|
278
|
-
|
279
|
-
# For
|
280
|
-
blocks = AnthropicConverter._convert_content_items(
|
281
|
-
[item], documentMode=False
|
282
|
-
)
|
332
|
+
elif isinstance(item, (TextContent, ImageContent)):
|
333
|
+
# For text and image, use standard conversion
|
334
|
+
blocks = AnthropicConverter._convert_content_items([item], document_mode=False)
|
283
335
|
anthropic_content.extend(blocks)
|
284
336
|
|
285
337
|
# If we ended up with no valid content blocks, create a placeholder
|
286
338
|
if not anthropic_content:
|
287
|
-
anthropic_content = [
|
288
|
-
TextBlockParam(type="text", text="[No content in tool result]")
|
289
|
-
]
|
339
|
+
anthropic_content = [TextBlockParam(type="text", text="[No content in tool result]")]
|
290
340
|
|
291
341
|
# Create the tool result block
|
292
342
|
return ToolResultBlockParam(
|
@@ -312,35 +362,47 @@ class AnthropicConverter:
|
|
312
362
|
content_blocks = []
|
313
363
|
|
314
364
|
for tool_use_id, result in tool_results:
|
315
|
-
#
|
316
|
-
|
365
|
+
# Process each tool result
|
366
|
+
tool_result_blocks = []
|
317
367
|
separate_blocks = []
|
318
368
|
|
369
|
+
# Process each content item in the result
|
319
370
|
for item in result.content:
|
320
|
-
# Text and images go in tool results, other resources (PDFs) go as separate blocks
|
321
371
|
if isinstance(item, (TextContent, ImageContent)):
|
322
|
-
|
372
|
+
blocks = AnthropicConverter._convert_content_items([item], document_mode=False)
|
373
|
+
tool_result_blocks.extend(blocks)
|
323
374
|
elif isinstance(item, EmbeddedResource):
|
324
|
-
|
325
|
-
|
326
|
-
|
375
|
+
resource_content = item.resource
|
376
|
+
|
377
|
+
# Text resources go in tool results, others go as separate blocks
|
378
|
+
if isinstance(resource_content, TextResourceContents):
|
379
|
+
block = AnthropicConverter._convert_embedded_resource(item, document_mode=False)
|
380
|
+
tool_result_blocks.append(block)
|
327
381
|
else:
|
328
|
-
# For binary resources like PDFs,
|
329
|
-
block = AnthropicConverter._convert_embedded_resource(
|
330
|
-
item, documentMode=True
|
331
|
-
)
|
382
|
+
# For binary resources like PDFs, add as separate block
|
383
|
+
block = AnthropicConverter._convert_embedded_resource(item, document_mode=True)
|
332
384
|
separate_blocks.append(block)
|
333
|
-
|
334
|
-
|
335
|
-
|
336
|
-
|
337
|
-
|
338
|
-
|
339
|
-
|
340
|
-
|
341
|
-
|
385
|
+
|
386
|
+
# Create the tool result block if we have content
|
387
|
+
if tool_result_blocks:
|
388
|
+
content_blocks.append(
|
389
|
+
ToolResultBlockParam(
|
390
|
+
type="tool_result",
|
391
|
+
tool_use_id=tool_use_id,
|
392
|
+
content=tool_result_blocks,
|
393
|
+
is_error=result.isError,
|
394
|
+
)
|
395
|
+
)
|
396
|
+
else:
|
397
|
+
# If there's no content, still create a placeholder
|
398
|
+
content_blocks.append(
|
399
|
+
ToolResultBlockParam(
|
400
|
+
type="tool_result",
|
401
|
+
tool_use_id=tool_use_id,
|
402
|
+
content=[TextBlockParam(type="text", text="[No content in tool result]")],
|
403
|
+
is_error=result.isError,
|
404
|
+
)
|
342
405
|
)
|
343
|
-
)
|
344
406
|
|
345
407
|
# Add separate blocks directly to the message
|
346
408
|
content_blocks.extend(separate_blocks)
|