fast-agent-mcp 0.1.8__py3-none-any.whl → 0.1.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. {fast_agent_mcp-0.1.8.dist-info → fast_agent_mcp-0.1.9.dist-info}/METADATA +26 -4
  2. {fast_agent_mcp-0.1.8.dist-info → fast_agent_mcp-0.1.9.dist-info}/RECORD +43 -22
  3. {fast_agent_mcp-0.1.8.dist-info → fast_agent_mcp-0.1.9.dist-info}/entry_points.txt +1 -0
  4. mcp_agent/agents/agent.py +5 -11
  5. mcp_agent/core/agent_app.py +89 -13
  6. mcp_agent/core/fastagent.py +13 -3
  7. mcp_agent/core/mcp_content.py +222 -0
  8. mcp_agent/core/prompt.py +132 -0
  9. mcp_agent/core/proxies.py +41 -36
  10. mcp_agent/logging/transport.py +30 -3
  11. mcp_agent/mcp/mcp_aggregator.py +11 -10
  12. mcp_agent/mcp/mime_utils.py +69 -0
  13. mcp_agent/mcp/prompt_message_multipart.py +64 -0
  14. mcp_agent/mcp/prompt_serialization.py +447 -0
  15. mcp_agent/mcp/prompts/__init__.py +0 -0
  16. mcp_agent/mcp/prompts/__main__.py +10 -0
  17. mcp_agent/mcp/prompts/prompt_server.py +508 -0
  18. mcp_agent/mcp/prompts/prompt_template.py +469 -0
  19. mcp_agent/mcp/resource_utils.py +203 -0
  20. mcp_agent/resources/examples/internal/agent.py +1 -1
  21. mcp_agent/resources/examples/internal/fastagent.config.yaml +2 -2
  22. mcp_agent/resources/examples/internal/sizer.py +0 -5
  23. mcp_agent/resources/examples/prompting/__init__.py +3 -0
  24. mcp_agent/resources/examples/prompting/agent.py +23 -0
  25. mcp_agent/resources/examples/prompting/fastagent.config.yaml +44 -0
  26. mcp_agent/resources/examples/prompting/image_server.py +56 -0
  27. mcp_agent/workflows/llm/anthropic_utils.py +101 -0
  28. mcp_agent/workflows/llm/augmented_llm.py +139 -66
  29. mcp_agent/workflows/llm/augmented_llm_anthropic.py +127 -251
  30. mcp_agent/workflows/llm/augmented_llm_openai.py +149 -305
  31. mcp_agent/workflows/llm/augmented_llm_passthrough.py +43 -0
  32. mcp_agent/workflows/llm/augmented_llm_playback.py +109 -0
  33. mcp_agent/workflows/llm/model_factory.py +20 -3
  34. mcp_agent/workflows/llm/openai_utils.py +65 -0
  35. mcp_agent/workflows/llm/providers/__init__.py +8 -0
  36. mcp_agent/workflows/llm/providers/multipart_converter_anthropic.py +348 -0
  37. mcp_agent/workflows/llm/providers/multipart_converter_openai.py +426 -0
  38. mcp_agent/workflows/llm/providers/openai_multipart.py +197 -0
  39. mcp_agent/workflows/llm/providers/sampling_converter_anthropic.py +258 -0
  40. mcp_agent/workflows/llm/providers/sampling_converter_openai.py +229 -0
  41. mcp_agent/workflows/llm/sampling_format_converter.py +39 -0
  42. mcp_agent/core/server_validation.py +0 -44
  43. mcp_agent/core/simulator_registry.py +0 -22
  44. mcp_agent/workflows/llm/enhanced_passthrough.py +0 -70
  45. {fast_agent_mcp-0.1.8.dist-info → fast_agent_mcp-0.1.9.dist-info}/WHEEL +0 -0
  46. {fast_agent_mcp-0.1.8.dist-info → fast_agent_mcp-0.1.9.dist-info}/licenses/LICENSE +0 -0
@@ -12,11 +12,6 @@ fast = FastAgent("Sizer Prompt Test")
12
12
  )
13
13
  async def main():
14
14
  async with fast.run() as agent:
15
- # await agent["sizer"].load_prompt("sizing_prompt_units", {"metric": "False"})
16
- # print(await agent["sizer"].load_prompt("category-category_prompt"))
17
- # await agent("What is the size of the moon?")
18
- # await agent("What is the size of the Earth?")
19
- # await agent("What is the size of the Sun?")
20
15
  await agent()
21
16
 
22
17
 
@@ -0,0 +1,3 @@
1
+ """
2
+ Prompting examples package for MCP Agent.
3
+ """
@@ -0,0 +1,23 @@
1
+ import asyncio
2
+ from mcp_agent.core.fastagent import FastAgent
3
+
4
+ # Create the application
5
+ fast = FastAgent("FastAgent Example")
6
+
7
+
8
+ # Define the agent
9
+ @fast.agent(
10
+ "agent",
11
+ instruction="You are a helpful AI Agent",
12
+ servers=["prompts"], # , "imgetage", "hfspace"],
13
+ # model="gpt-4o",
14
+ # instruction="You are a helpful AI Agent", servers=["prompts","basic_memory"], model="haiku"
15
+ )
16
+ async def main():
17
+ # use the --model command line switch or agent arguments to change model
18
+ async with fast.run() as agent:
19
+ await agent()
20
+
21
+
22
+ if __name__ == "__main__":
23
+ asyncio.run(main())
@@ -0,0 +1,44 @@
1
+ # FastAgent Configuration File
2
+
3
+ # Default Model Configuration:
4
+ #
5
+ # Takes format:
6
+ # <provider>.<model_string>.<reasoning_effort?> (e.g. anthropic.claude-3-5-sonnet-20241022 or openai.o3-mini.low)
7
+ # Accepts aliases for Anthropic Models: haiku, haiku3, sonnet, sonnet35, opus, opus3
8
+ # and OpenAI Models: gpt-4o-mini, gpt-4o, o1, o1-mini, o3-mini
9
+ #
10
+ # If not specified, defaults to "haiku".
11
+ # Can be overriden with a command line switch --model=<model>, or within the Agent constructor.
12
+
13
+ default_model: haiku
14
+
15
+ # Logging and Console Configuration:
16
+ logger:
17
+ # level: "debug" | "info" | "warning" | "error"
18
+ # type: "none" | "console" | "file" | "http"
19
+ # path: "/path/to/logfile.jsonl"
20
+ type: file
21
+ level: error
22
+ # Switch the progress display on or off
23
+ progress_display: true
24
+
25
+ # Show chat User/Assistant messages on the console
26
+ show_chat: true
27
+ # Show tool calls on the console
28
+ show_tools: true
29
+ # Truncate long tool responses on the console
30
+ truncate_tools: true
31
+
32
+ # MCP Servers
33
+ mcp:
34
+ servers:
35
+ prompts:
36
+ command: "prompt-server"
37
+ args: ["sizing.md", "resource.md","resource-exe.md","pdf_prompt.md"]
38
+ hfspace:
39
+ command: "npx"
40
+ args: ["@llmindset/mcp-hfspace"]
41
+ image:
42
+ command: "uv"
43
+ args: ["run", "image_server.py"]
44
+
@@ -0,0 +1,56 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Simple MCP server that responds to tool calls with text and image content.
4
+ """
5
+
6
+ import logging
7
+ from pathlib import Path
8
+
9
+ from mcp.server.fastmcp import FastMCP, Context, Image
10
+ from mcp.types import TextContent, ImageContent
11
+
12
+ # Configure logging
13
+ logging.basicConfig(level=logging.INFO)
14
+ logger = logging.getLogger(__name__)
15
+
16
+ # Create the FastMCP server
17
+ app = FastMCP(name="ImageToolServer", debug=True)
18
+
19
+
20
+ @app.tool(name="get_image", description="Returns an image with a descriptive text")
21
+ async def get_image(
22
+ image_name: str = "default", ctx: Context = None
23
+ ) -> list[TextContent | ImageContent]:
24
+ """
25
+ Returns an image file along with a descriptive text.
26
+
27
+ Args:
28
+ image_name: Name of the image to return (default just returns image.jpg)
29
+
30
+ Returns:
31
+ A list containing a text message and the requested image
32
+ """
33
+ try:
34
+ # Read the image file and convert to base64
35
+ # Create the response with text and image
36
+ return [
37
+ TextContent(type="text", text="Here's your image:"),
38
+ Image(path="image.jpg").to_image_content(),
39
+ ]
40
+ except Exception as e:
41
+ logger.exception(f"Error processing image: {e}")
42
+ return [TextContent(type="text", text=f"Error processing image: {str(e)}")]
43
+
44
+
45
+ if __name__ == "__main__":
46
+ # Check if the default image exists
47
+ if not Path("image.jpg").exists():
48
+ logger.warning(
49
+ "Default image file 'image.jpg' not found in the current directory"
50
+ )
51
+ logger.warning(
52
+ "Please add an image file named 'image.jpg' to the current directory"
53
+ )
54
+
55
+ # Run the server using stdio transport
56
+ app.run(transport="stdio")
@@ -0,0 +1,101 @@
1
+ """
2
+ Utility functions for Anthropic integration with MCP.
3
+
4
+ Provides conversion between Anthropic message formats and PromptMessageMultipart,
5
+ leveraging existing code for resource handling and delimited formats.
6
+ """
7
+
8
+ from anthropic.types import (
9
+ MessageParam,
10
+ )
11
+
12
+ from mcp.types import (
13
+ TextContent,
14
+ ImageContent,
15
+ EmbeddedResource,
16
+ TextResourceContents,
17
+ )
18
+
19
+ from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
20
+
21
+
22
+ # TODO -- only used for saving, but this will be driven directly from PromptMessages
23
+ def anthropic_message_param_to_prompt_message_multipart(
24
+ message_param: MessageParam,
25
+ ) -> PromptMessageMultipart:
26
+ """
27
+ Convert an Anthropic MessageParam to a PromptMessageMultipart.
28
+
29
+ Args:
30
+ message_param: The Anthropic MessageParam to convert
31
+
32
+ Returns:
33
+ A PromptMessageMultipart representation
34
+ """
35
+ role = message_param["role"]
36
+ content = message_param["content"]
37
+
38
+ # Handle string content (user messages can be simple strings)
39
+ if isinstance(content, str):
40
+ return PromptMessageMultipart(
41
+ role=role, content=[TextContent(type="text", text=content)]
42
+ )
43
+
44
+ # Convert content blocks to MCP content types
45
+ mcp_contents = []
46
+
47
+ for block in content:
48
+ if isinstance(block, dict):
49
+ if block.get("type") == "text":
50
+ text = block.get("text", "")
51
+
52
+ # Check if this is a resource marker
53
+ if (
54
+ text
55
+ and (
56
+ text.startswith("[Resource:")
57
+ or text.startswith("[Binary Resource:")
58
+ )
59
+ and "\n" in text
60
+ ):
61
+ header, content_text = text.split("\n", 1)
62
+ if "MIME:" in header:
63
+ mime_match = header.split("MIME:", 1)[1].split("]")[0].strip()
64
+ if (
65
+ mime_match != "text/plain"
66
+ ): # Only process non-plain text resources
67
+ if (
68
+ "Resource:" in header
69
+ and "Binary Resource:" not in header
70
+ ):
71
+ uri = (
72
+ header.split("Resource:", 1)[1]
73
+ .split(",")[0]
74
+ .strip()
75
+ )
76
+ mcp_contents.append(
77
+ EmbeddedResource(
78
+ type="resource",
79
+ resource=TextResourceContents(
80
+ uri=uri,
81
+ mimeType=mime_match,
82
+ text=content_text,
83
+ ),
84
+ )
85
+ )
86
+ continue
87
+
88
+ # Regular text content
89
+ mcp_contents.append(TextContent(type="text", text=text))
90
+
91
+ elif block.get("type") == "image":
92
+ # Image content
93
+ source = block.get("source", {})
94
+ if isinstance(source, dict) and source.get("type") == "base64":
95
+ media_type = source.get("media_type", "image/png")
96
+ data = source.get("data", "")
97
+ mcp_contents.append(
98
+ ImageContent(type="image", data=data, mimeType=media_type)
99
+ )
100
+
101
+ return PromptMessageMultipart(role=role, content=mcp_contents)
@@ -10,15 +10,28 @@ from typing import (
10
10
  TYPE_CHECKING,
11
11
  )
12
12
 
13
+ from mcp import CreateMessageResult, SamplingMessage
14
+ from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
15
+ from mcp_agent.workflows.llm.sampling_format_converter import (
16
+ SamplingFormatConverter,
17
+ MessageParamT,
18
+ MessageT,
19
+ )
20
+
21
+ # Forward reference for type annotations
22
+ if TYPE_CHECKING:
23
+ from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
24
+ from mcp_agent.agents.agent import Agent
25
+ from mcp_agent.context import Context
26
+
27
+
13
28
  from pydantic import Field
14
29
 
15
30
  from mcp.types import (
16
31
  CallToolRequest,
17
32
  CallToolResult,
18
33
  CreateMessageRequestParams,
19
- CreateMessageResult,
20
34
  ModelPreferences,
21
- SamplingMessage,
22
35
  PromptMessage,
23
36
  TextContent,
24
37
  GetPromptResult,
@@ -32,22 +45,10 @@ from mcp_agent.workflows.llm.llm_selector import ModelSelector
32
45
  from mcp_agent.ui.console_display import ConsoleDisplay
33
46
  from rich.text import Text
34
47
 
35
- if TYPE_CHECKING:
36
- from mcp_agent.agents.agent import Agent
37
- from mcp_agent.context import Context
38
-
39
- MessageParamT = TypeVar("MessageParamT")
40
- """A type representing an input message to an LLM."""
41
-
42
- MessageT = TypeVar("MessageT")
43
- """A type representing an output message from an LLM."""
44
48
 
45
49
  ModelT = TypeVar("ModelT")
46
50
  """A type representing a structured output message from an LLM."""
47
51
 
48
- # TODO: saqadri - SamplingMessage is fairly limiting - consider extending
49
- MCPMessageParam = SamplingMessage
50
- MCPMessageResult = CreateMessageResult
51
52
 
52
53
  # TODO -- move this to a constant
53
54
  HUMAN_INPUT_TOOL_NAME = "__human_input__"
@@ -216,25 +217,10 @@ class AugmentedLLMProtocol(Protocol, Generic[MessageParamT, MessageT]):
216
217
  ) -> ModelT:
217
218
  """Request a structured LLM generation and return the result as a Pydantic model."""
218
219
 
219
-
220
- class ProviderToMCPConverter(Protocol, Generic[MessageParamT, MessageT]):
221
- """Conversions between LLM provider and MCP types"""
222
-
223
- @classmethod
224
- def to_mcp_message_result(cls, result: MessageT) -> MCPMessageResult:
225
- """Convert an LLM response to an MCP message result type."""
226
-
227
- @classmethod
228
- def from_mcp_message_result(cls, result: MCPMessageResult) -> MessageT:
229
- """Convert an MCP message result to an LLM response type."""
230
-
231
- @classmethod
232
- def to_mcp_message_param(cls, param: MessageParamT) -> MCPMessageParam:
233
- """Convert an LLM input to an MCP message (SamplingMessage) type."""
234
-
235
- @classmethod
236
- def from_mcp_message_param(cls, param: MCPMessageParam) -> MessageParamT:
237
- """Convert an MCP message (SamplingMessage) to an LLM input type."""
220
+ async def generate_prompt(
221
+ self, prompt: PromptMessageMultipart, request_params: RequestParams | None
222
+ ) -> str:
223
+ """Request an LLM generation and return a string representation of the result"""
238
224
 
239
225
 
240
226
  class AugmentedLLM(ContextDependent, AugmentedLLMProtocol[MessageParamT, MessageT]):
@@ -257,7 +243,7 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol[MessageParamT, Message
257
243
  instruction: str | None = None,
258
244
  name: str | None = None,
259
245
  request_params: RequestParams | None = None,
260
- type_converter: Type[ProviderToMCPConverter[MessageParamT, MessageT]] = None,
246
+ type_converter: Type[SamplingFormatConverter[MessageParamT, MessageT]] = None,
261
247
  context: Optional["Context"] = None,
262
248
  **kwargs,
263
249
  ):
@@ -335,6 +321,10 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol[MessageParamT, Message
335
321
  ) -> ModelT:
336
322
  """Request a structured LLM generation and return the result as a Pydantic model."""
337
323
 
324
+ # aysnc def generate2_str(self, prompt: PromptMessageMultipart, request_params: RequestParams | None = None) -> List[MessageT]:
325
+ # """Request an LLM generation, which may run multiple iterations, and return the result"""
326
+ # return None
327
+
338
328
  async def select_model(
339
329
  self, request_params: RequestParams | None = None
340
330
  ) -> str | None:
@@ -379,10 +369,6 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol[MessageParamT, Message
379
369
  merged.update(provided_params.model_dump(exclude_unset=True))
380
370
  final_params = RequestParams(**merged)
381
371
 
382
- # self.logger.debug(
383
- # "Final merged params:", extra={"params": final_params.model_dump()}
384
- # )
385
-
386
372
  return final_params
387
373
 
388
374
  def get_request_params(
@@ -409,24 +395,24 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol[MessageParamT, Message
409
395
 
410
396
  return default_request_params
411
397
 
412
- def to_mcp_message_result(self, result: MessageT) -> MCPMessageResult:
398
+ def to_mcp_message_result(self, result: MessageT) -> CreateMessageResult:
413
399
  """Convert an LLM response to an MCP message result type."""
414
- return self.type_converter.to_mcp_message_result(result)
400
+ return self.type_converter.to_sampling_result(result)
415
401
 
416
- def from_mcp_message_result(self, result: MCPMessageResult) -> MessageT:
402
+ def from_mcp_message_result(self, result: CreateMessageResult) -> MessageT:
417
403
  """Convert an MCP message result to an LLM response type."""
418
- return self.type_converter.from_mcp_message_result(result)
404
+ return self.type_converter.from_sampling_result(result)
419
405
 
420
- def to_mcp_message_param(self, param: MessageParamT) -> MCPMessageParam:
406
+ def to_mcp_message_param(self, param: MessageParamT) -> SamplingMessage:
421
407
  """Convert an LLM input to an MCP message (SamplingMessage) type."""
422
- return self.type_converter.to_mcp_message_param(param)
408
+ return self.type_converter.to_sampling_message(param)
423
409
 
424
- def from_mcp_message_param(self, param: MCPMessageParam) -> MessageParamT:
410
+ def from_mcp_message_param(self, param: SamplingMessage) -> MessageParamT:
425
411
  """Convert an MCP message (SamplingMessage) to an LLM input type."""
426
- return self.type_converter.from_mcp_message_param(param)
412
+ return self.type_converter.from_sampling_message(param)
427
413
 
428
414
  def from_mcp_prompt_message(self, message: PromptMessage) -> MessageParamT:
429
- return self.type_converter.from_mcp_prompt_message(message)
415
+ return self.type_converter.from_prompt_message(message)
430
416
 
431
417
  @classmethod
432
418
  def convert_message_to_message_param(
@@ -680,10 +666,10 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol[MessageParamT, Message
680
666
  String representation of the assistant's response if generated,
681
667
  or the last assistant message in the prompt
682
668
  """
683
- prompt_messages: List[PromptMessage] = prompt_result.messages
669
+ from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
684
670
 
685
671
  # Check if we have any messages
686
- if not prompt_messages:
672
+ if not prompt_result.messages:
687
673
  return "Prompt contains no messages"
688
674
 
689
675
  # Extract arguments if they were stored in the result
@@ -693,12 +679,36 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol[MessageParamT, Message
693
679
  await self.show_prompt_loaded(
694
680
  prompt_name=prompt_name,
695
681
  description=prompt_result.description,
696
- message_count=len(prompt_messages),
682
+ message_count=len(prompt_result.messages),
697
683
  arguments=arguments,
698
684
  )
699
685
 
686
+ # Convert to PromptMessageMultipart objects
687
+ multipart_messages = PromptMessageMultipart.parse_get_prompt_result(
688
+ prompt_result
689
+ )
690
+
691
+ # Delegate to the provider-specific implementation
692
+ return await self._apply_prompt_template_provider_specific(multipart_messages)
693
+
694
+ async def _apply_prompt_template_provider_specific(
695
+ self, multipart_messages: List["PromptMessageMultipart"]
696
+ ) -> str:
697
+ """
698
+ Provider-specific implementation of apply_prompt_template.
699
+ This default implementation handles basic text content for any LLM type.
700
+ Provider-specific subclasses should override this method to handle
701
+ multimodal content appropriately.
702
+
703
+ Args:
704
+ multipart_messages: List of PromptMessageMultipart objects parsed from the prompt template
705
+
706
+ Returns:
707
+ String representation of the assistant's response if generated,
708
+ or the last assistant message in the prompt
709
+ """
700
710
  # Check the last message role
701
- last_message = prompt_messages[-1]
711
+ last_message = multipart_messages[-1]
702
712
 
703
713
  if last_message.role == "user":
704
714
  # For user messages: Add all previous messages to history, then generate response to the last one
@@ -707,20 +717,37 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol[MessageParamT, Message
707
717
  )
708
718
 
709
719
  # Add all but the last message to history
710
- if len(prompt_messages) > 1:
711
- previous_messages = prompt_messages[:-1]
720
+ if len(multipart_messages) > 1:
721
+ previous_messages = multipart_messages[:-1]
712
722
  converted = []
723
+
724
+ # Fallback generic method for all LLM types
713
725
  for msg in previous_messages:
714
- converted.append(self.type_converter.from_mcp_prompt_message(msg))
726
+ # Convert each PromptMessageMultipart to individual PromptMessages
727
+ prompt_messages = msg.to_prompt_messages()
728
+ for prompt_msg in prompt_messages:
729
+ converted.append(
730
+ self.type_converter.from_prompt_message(prompt_msg)
731
+ )
732
+
715
733
  self.history.extend(converted, is_prompt=True)
716
734
 
717
- # Extract the user's question and generate a response
718
- user_content = last_message.content
719
- user_text = (
720
- user_content.text
721
- if hasattr(user_content, "text")
722
- else str(user_content)
723
- )
735
+ # For generic LLMs, extract text and describe non-text content
736
+ user_text_parts = []
737
+ for content in last_message.content:
738
+ if content.type == "text":
739
+ user_text_parts.append(content.text)
740
+ elif content.type == "resource" and hasattr(content.resource, "text"):
741
+ user_text_parts.append(content.resource.text)
742
+ elif content.type == "image":
743
+ # Add a placeholder for images
744
+ mime_type = getattr(content, "mimeType", "image/unknown")
745
+ user_text_parts.append(f"[Image: {mime_type}]")
746
+
747
+ user_text = "\n".join(user_text_parts) if user_text_parts else ""
748
+ if not user_text:
749
+ # Fallback to original method if we couldn't extract text
750
+ user_text = str(last_message.content)
724
751
 
725
752
  return await self.generate_str(user_text)
726
753
  else:
@@ -731,10 +758,56 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol[MessageParamT, Message
731
758
 
732
759
  # Convert and add all messages to history
733
760
  converted = []
734
- for msg in prompt_messages:
735
- converted.append(self.type_converter.from_mcp_prompt_message(msg))
761
+
762
+ # Fallback to the original method for all LLM types
763
+ for msg in multipart_messages:
764
+ # Convert each PromptMessageMultipart to individual PromptMessages
765
+ prompt_messages = msg.to_prompt_messages()
766
+ for prompt_msg in prompt_messages:
767
+ converted.append(
768
+ self.type_converter.from_prompt_message(prompt_msg)
769
+ )
770
+
736
771
  self.history.extend(converted, is_prompt=True)
737
772
 
738
- # Return the assistant's message
739
- content = last_message.content
740
- return content.text if hasattr(content, "text") else str(content)
773
+ # Return the assistant's message with proper handling of different content types
774
+ assistant_text_parts = []
775
+ has_non_text_content = False
776
+
777
+ for content in last_message.content:
778
+ if content.type == "text":
779
+ assistant_text_parts.append(content.text)
780
+ elif content.type == "resource" and hasattr(content.resource, "text"):
781
+ # Add resource text with metadata
782
+ mime_type = getattr(content.resource, "mimeType", "text/plain")
783
+ uri = getattr(content.resource, "uri", "")
784
+ if uri:
785
+ assistant_text_parts.append(
786
+ f"[Resource: {uri}, Type: {mime_type}]\n{content.resource.text}"
787
+ )
788
+ else:
789
+ assistant_text_parts.append(
790
+ f"[Resource Type: {mime_type}]\n{content.resource.text}"
791
+ )
792
+ elif content.type == "image":
793
+ # Note the presence of images
794
+ mime_type = getattr(content, "mimeType", "image/unknown")
795
+ assistant_text_parts.append(f"[Image: {mime_type}]")
796
+ has_non_text_content = True
797
+ else:
798
+ # Other content types
799
+ assistant_text_parts.append(f"[Content of type: {content.type}]")
800
+ has_non_text_content = True
801
+
802
+ # Join all parts with double newlines for better readability
803
+ result = (
804
+ "\n\n".join(assistant_text_parts)
805
+ if assistant_text_parts
806
+ else str(last_message.content)
807
+ )
808
+
809
+ # Add a note if non-text content was present
810
+ if has_non_text_content:
811
+ result += "\n\n[Note: This message contained non-text content that may not be fully represented in text format]"
812
+
813
+ return result