fast-agent-mcp 0.1.8__py3-none-any.whl → 0.1.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. {fast_agent_mcp-0.1.8.dist-info → fast_agent_mcp-0.1.9.dist-info}/METADATA +26 -4
  2. {fast_agent_mcp-0.1.8.dist-info → fast_agent_mcp-0.1.9.dist-info}/RECORD +43 -22
  3. {fast_agent_mcp-0.1.8.dist-info → fast_agent_mcp-0.1.9.dist-info}/entry_points.txt +1 -0
  4. mcp_agent/agents/agent.py +5 -11
  5. mcp_agent/core/agent_app.py +89 -13
  6. mcp_agent/core/fastagent.py +13 -3
  7. mcp_agent/core/mcp_content.py +222 -0
  8. mcp_agent/core/prompt.py +132 -0
  9. mcp_agent/core/proxies.py +41 -36
  10. mcp_agent/logging/transport.py +30 -3
  11. mcp_agent/mcp/mcp_aggregator.py +11 -10
  12. mcp_agent/mcp/mime_utils.py +69 -0
  13. mcp_agent/mcp/prompt_message_multipart.py +64 -0
  14. mcp_agent/mcp/prompt_serialization.py +447 -0
  15. mcp_agent/mcp/prompts/__init__.py +0 -0
  16. mcp_agent/mcp/prompts/__main__.py +10 -0
  17. mcp_agent/mcp/prompts/prompt_server.py +508 -0
  18. mcp_agent/mcp/prompts/prompt_template.py +469 -0
  19. mcp_agent/mcp/resource_utils.py +203 -0
  20. mcp_agent/resources/examples/internal/agent.py +1 -1
  21. mcp_agent/resources/examples/internal/fastagent.config.yaml +2 -2
  22. mcp_agent/resources/examples/internal/sizer.py +0 -5
  23. mcp_agent/resources/examples/prompting/__init__.py +3 -0
  24. mcp_agent/resources/examples/prompting/agent.py +23 -0
  25. mcp_agent/resources/examples/prompting/fastagent.config.yaml +44 -0
  26. mcp_agent/resources/examples/prompting/image_server.py +56 -0
  27. mcp_agent/workflows/llm/anthropic_utils.py +101 -0
  28. mcp_agent/workflows/llm/augmented_llm.py +139 -66
  29. mcp_agent/workflows/llm/augmented_llm_anthropic.py +127 -251
  30. mcp_agent/workflows/llm/augmented_llm_openai.py +149 -305
  31. mcp_agent/workflows/llm/augmented_llm_passthrough.py +43 -0
  32. mcp_agent/workflows/llm/augmented_llm_playback.py +109 -0
  33. mcp_agent/workflows/llm/model_factory.py +20 -3
  34. mcp_agent/workflows/llm/openai_utils.py +65 -0
  35. mcp_agent/workflows/llm/providers/__init__.py +8 -0
  36. mcp_agent/workflows/llm/providers/multipart_converter_anthropic.py +348 -0
  37. mcp_agent/workflows/llm/providers/multipart_converter_openai.py +426 -0
  38. mcp_agent/workflows/llm/providers/openai_multipart.py +197 -0
  39. mcp_agent/workflows/llm/providers/sampling_converter_anthropic.py +258 -0
  40. mcp_agent/workflows/llm/providers/sampling_converter_openai.py +229 -0
  41. mcp_agent/workflows/llm/sampling_format_converter.py +39 -0
  42. mcp_agent/core/server_validation.py +0 -44
  43. mcp_agent/core/simulator_registry.py +0 -22
  44. mcp_agent/workflows/llm/enhanced_passthrough.py +0 -70
  45. {fast_agent_mcp-0.1.8.dist-info → fast_agent_mcp-0.1.9.dist-info}/WHEEL +0 -0
  46. {fast_agent_mcp-0.1.8.dist-info → fast_agent_mcp-0.1.9.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,109 @@
1
+ from typing import List, Optional, Union
2
+ from mcp import GetPromptResult
3
+ from mcp.types import PromptMessage
4
+ from mcp_agent.workflows.llm.augmented_llm import MessageParamT, RequestParams
5
+ from mcp_agent.workflows.llm.augmented_llm_passthrough import PassthroughLLM
6
+
7
+
8
+ # TODO -- support tool calling
9
+ class PlaybackLLM(PassthroughLLM):
10
+ """
11
+ A specialized LLM implementation that plays back assistant messages when loaded with prompts.
12
+
13
+ Unlike the PassthroughLLM which simply passes through messages without modification,
14
+ PlaybackLLM is designed to simulate a conversation by playing back prompt messages
15
+ in sequence when loaded with prompts through apply_prompt_template.
16
+
17
+ After apply_prompts has been called, each call to generate_str returns the next
18
+ "ASSISTANT" message in the loaded messages. If no messages are set or all messages have
19
+ been played back, it returns a message indicating that messages are exhausted.
20
+ """
21
+
22
+ def __init__(self, name: str = "Playback", **kwargs):
23
+ super().__init__(name=name, **kwargs)
24
+ self._messages: List[PromptMessage] = []
25
+ self._current_index = 0
26
+
27
+ async def generate_str(
28
+ self,
29
+ message: Union[str, MessageParamT, List[MessageParamT]],
30
+ request_params: Optional[RequestParams] = None,
31
+ ) -> str:
32
+ """
33
+ Return the next ASSISTANT message in the loaded messages list.
34
+ If no messages are available or all have been played back,
35
+ returns a message indicating messages are exhausted.
36
+
37
+ Note: Only assistant messages are returned; user messages are skipped.
38
+ """
39
+ self.show_user_message(message, model="fastagent-playback", chat_turn=0)
40
+
41
+ if not self._messages or self._current_index >= len(self._messages):
42
+ size = len(self._messages) if self._messages else 0
43
+ response = f"MESSAGES EXHAUSTED (list size {size})"
44
+ else:
45
+ response = self._get_next_assistant_message()
46
+
47
+ await self.show_assistant_message(response, title="ASSISTANT/PLAYBACK")
48
+ return response
49
+
50
+ def _get_next_assistant_message(self) -> str:
51
+ """
52
+ Get the next assistant message from the loaded messages.
53
+ Increments the current message index and skips user messages.
54
+ """
55
+ # Find next assistant message
56
+ while self._current_index < len(self._messages):
57
+ message = self._messages[self._current_index]
58
+ self._current_index += 1
59
+
60
+ # Skip non-assistant messages
61
+ if getattr(message, "role", None) != "assistant":
62
+ continue
63
+
64
+ # Get content as string
65
+ content = message.content
66
+ if hasattr(content, "text"):
67
+ return content.text
68
+ return str(content)
69
+
70
+ # If we get here, we've run out of assistant messages
71
+ return f"MESSAGES EXHAUSTED (list size {len(self._messages)})"
72
+
73
+ async def apply_prompt_template(
74
+ self, prompt_result: GetPromptResult, prompt_name: str
75
+ ) -> str:
76
+ """
77
+ Apply a prompt template by adding its messages to the playback queue.
78
+
79
+ Args:
80
+ prompt_result: The GetPromptResult containing prompt messages
81
+ prompt_name: The name of the prompt being applied
82
+
83
+ Returns:
84
+ String representation of the first message or an indication that no messages were added
85
+ """
86
+ prompt_messages: List[PromptMessage] = prompt_result.messages
87
+
88
+ # Extract arguments if they were stored in the result
89
+ arguments = getattr(prompt_result, "arguments", None)
90
+
91
+ # Display information about the loaded prompt
92
+ await self.show_prompt_loaded(
93
+ prompt_name=prompt_name,
94
+ description=prompt_result.description,
95
+ message_count=len(prompt_messages),
96
+ arguments=arguments,
97
+ )
98
+
99
+ # Add new messages to the end of the existing messages list
100
+ self._messages.extend(prompt_messages)
101
+
102
+ if not prompt_messages:
103
+ return "Prompt contains no messages"
104
+
105
+ # Reset current index if this is the first time loading messages
106
+ if len(self._messages) == len(prompt_messages):
107
+ self._current_index = 0
108
+
109
+ return f"Added {len(prompt_messages)} messages to playback queue"
@@ -8,9 +8,16 @@ from mcp_agent.workflows.llm.augmented_llm_anthropic import AnthropicAugmentedLL
8
8
  from mcp_agent.workflows.llm.augmented_llm_openai import OpenAIAugmentedLLM
9
9
  from mcp_agent.workflows.llm.augmented_llm import RequestParams
10
10
  from mcp_agent.workflows.llm.augmented_llm_passthrough import PassthroughLLM
11
+ from mcp_agent.workflows.llm.augmented_llm_playback import PlaybackLLM
12
+
11
13
 
12
14
  # Type alias for LLM classes
13
- LLMClass = Union[Type[AnthropicAugmentedLLM], Type[OpenAIAugmentedLLM]]
15
+ LLMClass = Union[
16
+ Type[AnthropicAugmentedLLM],
17
+ Type[OpenAIAugmentedLLM],
18
+ Type[PassthroughLLM],
19
+ Type[PlaybackLLM],
20
+ ]
14
21
 
15
22
 
16
23
  class Provider(Enum):
@@ -55,12 +62,13 @@ class ModelFactory:
55
62
  "high": ReasoningEffort.HIGH,
56
63
  }
57
64
 
58
- # TODO -- add context window size information for display/mmanagement
65
+ # TODO -- add context window size information for display/management
59
66
  # TODO -- add audio supporting got-4o-audio-preview
60
67
  # TODO -- bring model parameter configuration here
61
68
  # Mapping of model names to their default providers
62
69
  DEFAULT_PROVIDERS = {
63
70
  "passthrough": Provider.FAST_AGENT,
71
+ "playback": Provider.FAST_AGENT,
64
72
  "gpt-4o": Provider.OPENAI,
65
73
  "gpt-4o-mini": Provider.OPENAI,
66
74
  "o1-mini": Provider.OPENAI,
@@ -98,6 +106,12 @@ class ModelFactory:
98
106
  Provider.FAST_AGENT: PassthroughLLM,
99
107
  }
100
108
 
109
+ # Mapping of special model names to their specific LLM classes
110
+ # This overrides the provider-based class selection
111
+ MODEL_SPECIFIC_CLASSES: Dict[str, LLMClass] = {
112
+ "playback": PlaybackLLM,
113
+ }
114
+
101
115
  @classmethod
102
116
  def parse_model_string(cls, model_string: str) -> ModelConfig:
103
117
  """Parse a model string into a ModelConfig object"""
@@ -151,7 +165,10 @@ class ModelFactory:
151
165
  """
152
166
  # Parse configuration up front
153
167
  config = cls.parse_model_string(model_string)
154
- llm_class = cls.PROVIDER_CLASSES[config.provider]
168
+ if config.model_name in cls.MODEL_SPECIFIC_CLASSES:
169
+ llm_class = cls.MODEL_SPECIFIC_CLASSES[config.model_name]
170
+ else:
171
+ llm_class = cls.PROVIDER_CLASSES[config.provider]
155
172
 
156
173
  # Create a factory function matching the attach_llm protocol
157
174
  def factory(agent: Agent, **kwargs) -> LLMClass:
@@ -0,0 +1,65 @@
1
+ """
2
+ Utility functions for OpenAI integration with MCP.
3
+
4
+ This file provides backward compatibility with the existing API while
5
+ delegating to the proper implementations in the providers/ directory.
6
+ """
7
+
8
+ from typing import Dict, Any, Union
9
+
10
+ from openai.types.chat import (
11
+ ChatCompletionMessage,
12
+ ChatCompletionMessageParam,
13
+ )
14
+
15
+ from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
16
+ from mcp_agent.workflows.llm.providers.multipart_converter_openai import OpenAIConverter
17
+ from mcp_agent.workflows.llm.providers.openai_multipart import (
18
+ openai_to_multipart,
19
+ )
20
+
21
+
22
+ def openai_message_to_prompt_message_multipart(
23
+ message: Union[ChatCompletionMessage, Dict[str, Any]],
24
+ ) -> PromptMessageMultipart:
25
+ """
26
+ Convert an OpenAI ChatCompletionMessage to a PromptMessageMultipart.
27
+
28
+ Args:
29
+ message: The OpenAI message to convert (can be an actual ChatCompletionMessage
30
+ or a dictionary with the same structure)
31
+
32
+ Returns:
33
+ A PromptMessageMultipart representation
34
+ """
35
+ return openai_to_multipart(message)
36
+
37
+
38
+ def openai_message_param_to_prompt_message_multipart(
39
+ message_param: ChatCompletionMessageParam,
40
+ ) -> PromptMessageMultipart:
41
+ """
42
+ Convert an OpenAI ChatCompletionMessageParam to a PromptMessageMultipart.
43
+
44
+ Args:
45
+ message_param: The OpenAI message param to convert
46
+
47
+ Returns:
48
+ A PromptMessageMultipart representation
49
+ """
50
+ return openai_to_multipart(message_param)
51
+
52
+
53
+ def prompt_message_multipart_to_openai_message_param(
54
+ multipart: PromptMessageMultipart,
55
+ ) -> ChatCompletionMessageParam:
56
+ """
57
+ Convert a PromptMessageMultipart to an OpenAI ChatCompletionMessageParam.
58
+
59
+ Args:
60
+ multipart: The PromptMessageMultipart to convert
61
+
62
+ Returns:
63
+ An OpenAI ChatCompletionMessageParam representation
64
+ """
65
+ return OpenAIConverter.convert_to_openai(multipart)
@@ -0,0 +1,8 @@
1
+ from mcp_agent.workflows.llm.providers.sampling_converter_anthropic import (
2
+ AnthropicSamplingConverter,
3
+ )
4
+ from mcp_agent.workflows.llm.providers.sampling_converter_openai import (
5
+ OpenAISamplingConverter,
6
+ )
7
+
8
+ __all__ = ["AnthropicSamplingConverter", "OpenAISamplingConverter"]
@@ -0,0 +1,348 @@
1
+ from typing import List, Union, Sequence
2
+
3
+ from mcp.types import (
4
+ TextContent,
5
+ ImageContent,
6
+ EmbeddedResource,
7
+ CallToolResult,
8
+ TextResourceContents,
9
+ BlobResourceContents,
10
+ )
11
+ from pydantic import AnyUrl
12
+ from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
13
+ from mcp_agent.mcp.mime_utils import (
14
+ guess_mime_type,
15
+ is_text_mime_type,
16
+ is_image_mime_type,
17
+ )
18
+
19
+ from anthropic.types import (
20
+ MessageParam,
21
+ TextBlockParam,
22
+ ImageBlockParam,
23
+ DocumentBlockParam,
24
+ Base64ImageSourceParam,
25
+ URLImageSourceParam,
26
+ Base64PDFSourceParam,
27
+ URLPDFSourceParam,
28
+ PlainTextSourceParam,
29
+ ToolResultBlockParam,
30
+ ContentBlockParam,
31
+ )
32
+ from mcp_agent.logging.logger import get_logger
33
+ from mcp_agent.mcp.resource_utils import extract_title_from_uri
34
+
35
+ _logger = get_logger("multipart_converter_anthropic")
36
+ # List of image MIME types supported by Anthropic API
37
+ SUPPORTED_IMAGE_MIME_TYPES = {"image/jpeg", "image/png", "image/gif", "image/webp"}
38
+
39
+
40
+ class AnthropicConverter:
41
+ """Converts MCP message types to Anthropic API format."""
42
+
43
+ @staticmethod
44
+ def _convert_content_items(
45
+ content_items: Sequence[Union[TextContent, ImageContent, EmbeddedResource]],
46
+ documentMode: bool = True,
47
+ ) -> List[ContentBlockParam]:
48
+ """
49
+ Helper method to convert a list of content items to Anthropic format.
50
+
51
+ Args:
52
+ content_items: Sequence of MCP content items
53
+ documentMode: Whether to convert text resources to document blocks (True) or text blocks (False)
54
+
55
+ Returns:
56
+ List of Anthropic content blocks
57
+ """
58
+
59
+ anthropic_blocks: List[ContentBlockParam] = []
60
+
61
+ for content_item in content_items:
62
+ if isinstance(content_item, TextContent):
63
+ anthropic_block = AnthropicConverter._convert_text_content(content_item)
64
+ anthropic_blocks.append(anthropic_block)
65
+ elif isinstance(content_item, ImageContent):
66
+ # Check if image MIME type is supported
67
+ if content_item.mimeType not in SUPPORTED_IMAGE_MIME_TYPES:
68
+ anthropic_block = AnthropicConverter._format_fail_message(
69
+ content_item, content_item.mimeType
70
+ )
71
+ else:
72
+ anthropic_block = AnthropicConverter._convert_image_content(
73
+ content_item
74
+ )
75
+ anthropic_blocks.append(anthropic_block)
76
+ elif isinstance(content_item, EmbeddedResource):
77
+ anthropic_block = AnthropicConverter._convert_embedded_resource(
78
+ content_item, documentMode
79
+ )
80
+ anthropic_blocks.append(anthropic_block)
81
+
82
+ return anthropic_blocks
83
+
84
+ @staticmethod
85
+ def _format_fail_message(
86
+ resource: Union[TextContent, ImageContent, EmbeddedResource], mimetype: str
87
+ ) -> TextBlockParam:
88
+ """Create a fallback text block for unsupported resource types"""
89
+ fallback_text: str = f"Unknown resource with format {mimetype}"
90
+ if resource.type == "image":
91
+ fallback_text = f"Image with unsupported format '{mimetype}' ({len(resource.data)} characters)"
92
+ if isinstance(resource, EmbeddedResource):
93
+ if isinstance(resource.resource, BlobResourceContents):
94
+ fallback_text = f"Embedded Resource {resource.resource.uri._url} with unsupported format {resource.resource.mimeType} ({len(resource.resource.blob)} characters)"
95
+
96
+ return TextBlockParam(type="text", text=fallback_text)
97
+
98
+ @staticmethod
99
+ def convert_to_anthropic(multipart_msg: PromptMessageMultipart) -> MessageParam:
100
+ """
101
+ Convert a PromptMessageMultipart message to Anthropic API format.
102
+
103
+ Args:
104
+ multipart_msg: The PromptMessageMultipart message to convert
105
+
106
+ Returns:
107
+ An Anthropic API MessageParam object
108
+ """
109
+ # Extract role
110
+ role: str = multipart_msg.role
111
+
112
+ # Convert content blocks
113
+ anthropic_blocks: List[MessageParam] = (
114
+ AnthropicConverter._convert_content_items(multipart_msg.content)
115
+ )
116
+
117
+ # Filter blocks based on role (assistant can only have text blocks)
118
+ if role == "assistant":
119
+ text_blocks = []
120
+ for block in anthropic_blocks:
121
+ if block.get("type") == "text":
122
+ text_blocks.append(block)
123
+ else:
124
+ _logger.warning(
125
+ f"Removing non-text block from assistant message: {block.get('type')}"
126
+ )
127
+ anthropic_blocks = text_blocks
128
+
129
+ # Create the Anthropic message
130
+ return MessageParam(role=role, content=anthropic_blocks)
131
+
132
+ @staticmethod
133
+ def _convert_text_content(content: TextContent) -> TextBlockParam:
134
+ """Convert TextContent to Anthropic TextBlockParam."""
135
+ return TextBlockParam(type="text", text=content.text)
136
+
137
+ @staticmethod
138
+ def _convert_image_content(content: ImageContent) -> ImageBlockParam:
139
+ """Convert ImageContent to Anthropic ImageBlockParam."""
140
+ # MIME type validation already done in the main convert method
141
+ return ImageBlockParam(
142
+ type="image",
143
+ source=Base64ImageSourceParam(
144
+ type="base64", media_type=content.mimeType, data=content.data
145
+ ),
146
+ )
147
+
148
+ @staticmethod
149
+ def _determine_mime_type(
150
+ resource: TextResourceContents | BlobResourceContents,
151
+ ) -> str:
152
+ if resource.mimeType:
153
+ return resource.mimeType
154
+
155
+ if resource.uri:
156
+ return guess_mime_type(resource.uri.serialize_url)
157
+
158
+ if resource.blob:
159
+ return "application/octet-stream"
160
+ else:
161
+ return "text/plain"
162
+
163
+ @staticmethod
164
+ def _convert_embedded_resource(
165
+ resource: EmbeddedResource,
166
+ documentMode: bool = True,
167
+ ) -> ContentBlockParam:
168
+ """Convert EmbeddedResource to appropriate Anthropic block type.
169
+
170
+ Args:
171
+ resource: The embedded resource to convert
172
+ documentMode: Whether to convert text resources to Document blocks (True) or Text blocks (False)
173
+
174
+ Returns:
175
+ An appropriate ContentBlockParam for the resource
176
+ """
177
+ resource_content: TextResourceContents | BlobResourceContents = (
178
+ resource.resource
179
+ )
180
+ uri: AnyUrl = resource_content.uri
181
+ is_url: bool = uri.scheme in ("http", "https")
182
+ mime_type = AnthropicConverter._determine_mime_type(resource_content)
183
+ # Extract title from URI
184
+ title = extract_title_from_uri(uri) if uri else None
185
+
186
+ # Special case for SVG - it's actually text/XML, so extract as text
187
+ if mime_type == "image/svg+xml":
188
+ if hasattr(resource_content, "text"):
189
+ # For SVG from text resource
190
+ svg_content = resource_content.text
191
+ return TextBlockParam(type="text", text=f"```xml\n{svg_content}\n```")
192
+
193
+ # Handle image resources
194
+ if is_image_mime_type(mime_type):
195
+ # Check if image MIME type is supported
196
+ if mime_type not in SUPPORTED_IMAGE_MIME_TYPES:
197
+ return AnthropicConverter._format_fail_message(resource, mime_type)
198
+
199
+ # Handle supported image types
200
+ if is_url:
201
+ return ImageBlockParam(
202
+ type="image", source=URLImageSourceParam(type="url", url=str(uri))
203
+ )
204
+ elif hasattr(resource_content, "blob"):
205
+ return ImageBlockParam(
206
+ type="image",
207
+ source=Base64ImageSourceParam(
208
+ type="base64", media_type=mime_type, data=resource_content.blob
209
+ ),
210
+ )
211
+
212
+ # Handle PDF resources
213
+ elif mime_type == "application/pdf":
214
+ if is_url:
215
+ return DocumentBlockParam(
216
+ type="document",
217
+ title=title,
218
+ source=URLPDFSourceParam(type="url", url=str(uri)),
219
+ )
220
+ elif hasattr(resource_content, "blob"):
221
+ return DocumentBlockParam(
222
+ type="document",
223
+ title=title,
224
+ source=Base64PDFSourceParam(
225
+ type="base64",
226
+ media_type="application/pdf",
227
+ data=resource_content.blob,
228
+ ),
229
+ )
230
+
231
+ # Handle text resources (default for all other text mime types)
232
+ elif is_text_mime_type(mime_type):
233
+ if documentMode:
234
+ if hasattr(resource_content, "text"):
235
+ return DocumentBlockParam(
236
+ type="document",
237
+ title=title,
238
+ source=PlainTextSourceParam(
239
+ type="text",
240
+ media_type="text/plain",
241
+ data=resource_content.text,
242
+ ),
243
+ )
244
+ # Return as text block when documentMode is False
245
+ if hasattr(resource_content, "text"):
246
+ return TextBlockParam(type="text", text=resource_content.text)
247
+
248
+ # Default fallback - convert to text if possible
249
+ if hasattr(resource_content, "text"):
250
+ return TextBlockParam(type="text", text=resource_content.text)
251
+
252
+ return AnthropicConverter._format_fail_message(resource, mime_type)
253
+
254
+ @staticmethod
255
+ def convert_tool_result_to_anthropic(
256
+ tool_result: CallToolResult, tool_use_id: str
257
+ ) -> ToolResultBlockParam:
258
+ """
259
+ Convert an MCP CallToolResult to an Anthropic ToolResultBlockParam.
260
+
261
+ Args:
262
+ tool_result: The tool result from a tool call
263
+ tool_use_id: The ID of the associated tool use
264
+
265
+ Returns:
266
+ An Anthropic ToolResultBlockParam ready to be included in a user message
267
+ """
268
+ # For tool results, we always use documentMode=False to get text blocks instead of document blocks
269
+ anthropic_content = []
270
+
271
+ for item in tool_result.content:
272
+ if isinstance(item, EmbeddedResource):
273
+ # For embedded resources, always use text mode in tool results
274
+ resource_block = AnthropicConverter._convert_embedded_resource(
275
+ item, documentMode=False
276
+ )
277
+ anthropic_content.append(resource_block)
278
+ else:
279
+ # For other types (Text, Image), use standard conversion
280
+ blocks = AnthropicConverter._convert_content_items(
281
+ [item], documentMode=False
282
+ )
283
+ anthropic_content.extend(blocks)
284
+
285
+ # If we ended up with no valid content blocks, create a placeholder
286
+ if not anthropic_content:
287
+ anthropic_content = [
288
+ TextBlockParam(type="text", text="[No content in tool result]")
289
+ ]
290
+
291
+ # Create the tool result block
292
+ return ToolResultBlockParam(
293
+ type="tool_result",
294
+ tool_use_id=tool_use_id,
295
+ content=anthropic_content,
296
+ is_error=tool_result.isError,
297
+ )
298
+
299
+ @staticmethod
300
+ def create_tool_results_message(
301
+ tool_results: List[tuple[str, CallToolResult]],
302
+ ) -> MessageParam:
303
+ """
304
+ Create a user message containing tool results.
305
+
306
+ Args:
307
+ tool_results: List of (tool_use_id, tool_result) tuples
308
+
309
+ Returns:
310
+ A MessageParam with role='user' containing all tool results
311
+ """
312
+ content_blocks = []
313
+
314
+ for tool_use_id, result in tool_results:
315
+ # Split into text/image content vs other content
316
+ tool_content = []
317
+ separate_blocks = []
318
+
319
+ for item in result.content:
320
+ # Text and images go in tool results, other resources (PDFs) go as separate blocks
321
+ if isinstance(item, (TextContent, ImageContent)):
322
+ tool_content.append(item)
323
+ elif isinstance(item, EmbeddedResource):
324
+ # If it's a text resource, keep it in tool_content
325
+ if isinstance(item.resource, TextResourceContents):
326
+ tool_content.append(item)
327
+ else:
328
+ # For binary resources like PDFs, convert and add as separate block
329
+ block = AnthropicConverter._convert_embedded_resource(
330
+ item, documentMode=True
331
+ )
332
+ separate_blocks.append(block)
333
+ else:
334
+ tool_content.append(item)
335
+
336
+ # Always create a tool result block, even if empty
337
+ # If tool_content is empty, we'll get a placeholder text block added in convert_tool_result_to_anthropic
338
+ tool_result = CallToolResult(content=tool_content, isError=result.isError)
339
+ content_blocks.append(
340
+ AnthropicConverter.convert_tool_result_to_anthropic(
341
+ tool_result, tool_use_id
342
+ )
343
+ )
344
+
345
+ # Add separate blocks directly to the message
346
+ content_blocks.extend(separate_blocks)
347
+
348
+ return MessageParam(role="user", content=content_blocks)