fast-agent-mcp 0.1.4__py3-none-any.whl → 0.1.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (28) hide show
  1. {fast_agent_mcp-0.1.4.dist-info → fast_agent_mcp-0.1.5.dist-info}/METADATA +5 -1
  2. {fast_agent_mcp-0.1.4.dist-info → fast_agent_mcp-0.1.5.dist-info}/RECORD +28 -17
  3. mcp_agent/agents/agent.py +46 -0
  4. mcp_agent/core/agent_app.py +373 -9
  5. mcp_agent/core/decorators.py +455 -0
  6. mcp_agent/core/enhanced_prompt.py +70 -4
  7. mcp_agent/core/factory.py +501 -0
  8. mcp_agent/core/fastagent.py +140 -1059
  9. mcp_agent/core/proxies.py +51 -11
  10. mcp_agent/core/validation.py +221 -0
  11. mcp_agent/human_input/handler.py +5 -2
  12. mcp_agent/mcp/mcp_aggregator.py +537 -47
  13. mcp_agent/mcp/mcp_connection_manager.py +13 -2
  14. mcp_agent/mcp_server/__init__.py +4 -0
  15. mcp_agent/mcp_server/agent_server.py +121 -0
  16. mcp_agent/resources/examples/internal/fastagent.config.yaml +52 -0
  17. mcp_agent/resources/examples/internal/prompt_category.py +21 -0
  18. mcp_agent/resources/examples/internal/prompt_sizing.py +53 -0
  19. mcp_agent/resources/examples/internal/sizer.py +24 -0
  20. mcp_agent/resources/examples/researcher/fastagent.config.yaml +14 -1
  21. mcp_agent/resources/examples/workflows/sse.py +23 -0
  22. mcp_agent/ui/console_display.py +278 -0
  23. mcp_agent/workflows/llm/augmented_llm.py +245 -179
  24. mcp_agent/workflows/llm/augmented_llm_anthropic.py +49 -3
  25. mcp_agent/workflows/llm/augmented_llm_openai.py +52 -4
  26. {fast_agent_mcp-0.1.4.dist-info → fast_agent_mcp-0.1.5.dist-info}/WHEEL +0 -0
  27. {fast_agent_mcp-0.1.4.dist-info → fast_agent_mcp-0.1.5.dist-info}/entry_points.txt +0 -0
  28. {fast_agent_mcp-0.1.4.dist-info → fast_agent_mcp-0.1.5.dist-info}/licenses/LICENSE +0 -0
@@ -21,17 +21,18 @@ from mcp.types import (
21
21
  CreateMessageResult,
22
22
  ModelPreferences,
23
23
  SamplingMessage,
24
+ PromptMessage,
24
25
  TextContent,
26
+ GetPromptResult,
25
27
  )
26
28
 
27
29
  from mcp_agent.context_dependent import ContextDependent
28
30
  from mcp_agent.core.exceptions import PromptExitError
29
31
  from mcp_agent.event_progress import ProgressAction
30
- from mcp_agent.mcp.mcp_aggregator import MCPAggregator, SEP
32
+ from mcp_agent.mcp.mcp_aggregator import MCPAggregator
31
33
  from mcp_agent.workflows.llm.llm_selector import ModelSelector
32
- from rich.panel import Panel
34
+ from mcp_agent.ui.console_display import ConsoleDisplay
33
35
  from rich.text import Text
34
- from mcp_agent import console
35
36
 
36
37
  if TYPE_CHECKING:
37
38
  from mcp_agent.agents.agent import Agent
@@ -63,39 +64,96 @@ class Memory(Protocol, Generic[MessageParamT]):
63
64
 
64
65
  def __init__(self): ...
65
66
 
66
- def extend(self, messages: List[MessageParamT]) -> None: ...
67
+ def extend(
68
+ self, messages: List[MessageParamT], is_prompt: bool = False
69
+ ) -> None: ...
67
70
 
68
- def set(self, messages: List[MessageParamT]) -> None: ...
71
+ def set(self, messages: List[MessageParamT], is_prompt: bool = False) -> None: ...
69
72
 
70
- def append(self, message: MessageParamT) -> None: ...
73
+ def append(self, message: MessageParamT, is_prompt: bool = False) -> None: ...
71
74
 
72
- def get(self) -> List[MessageParamT]: ...
75
+ def get(self, include_history: bool = True) -> List[MessageParamT]: ...
73
76
 
74
- def clear(self) -> None: ...
77
+ def clear(self, clear_prompts: bool = False) -> None: ...
75
78
 
76
79
 
77
80
  class SimpleMemory(Memory, Generic[MessageParamT]):
78
81
  """
79
82
  Simple memory management for storing past interactions in-memory.
83
+
84
+ Maintains both prompt messages (which are always included) and
85
+ generated conversation history (which is included based on use_history setting).
80
86
  """
81
87
 
82
88
  def __init__(self):
83
89
  self.history: List[MessageParamT] = []
90
+ self.prompt_messages: List[MessageParamT] = [] # Always included
91
+
92
+ def extend(self, messages: List[MessageParamT], is_prompt: bool = False):
93
+ """
94
+ Add multiple messages to history.
84
95
 
85
- def extend(self, messages: List[MessageParamT]):
86
- self.history.extend(messages)
96
+ Args:
97
+ messages: Messages to add
98
+ is_prompt: If True, add to prompt_messages instead of regular history
99
+ """
100
+ if is_prompt:
101
+ self.prompt_messages.extend(messages)
102
+ else:
103
+ self.history.extend(messages)
87
104
 
88
- def set(self, messages: List[MessageParamT]):
89
- self.history = messages.copy()
105
+ def set(self, messages: List[MessageParamT], is_prompt: bool = False):
106
+ """
107
+ Replace messages in history.
108
+
109
+ Args:
110
+ messages: Messages to set
111
+ is_prompt: If True, replace prompt_messages instead of regular history
112
+ """
113
+ if is_prompt:
114
+ self.prompt_messages = messages.copy()
115
+ else:
116
+ self.history = messages.copy()
117
+
118
+ def append(self, message: MessageParamT, is_prompt: bool = False):
119
+ """
120
+ Add a single message to history.
121
+
122
+ Args:
123
+ message: Message to add
124
+ is_prompt: If True, add to prompt_messages instead of regular history
125
+ """
126
+ if is_prompt:
127
+ self.prompt_messages.append(message)
128
+ else:
129
+ self.history.append(message)
130
+
131
+ def get(self, include_history: bool = True) -> List[MessageParamT]:
132
+ """
133
+ Get all messages in memory.
134
+
135
+ Args:
136
+ include_history: If True, include regular history messages
137
+ If False, only return prompt messages
90
138
 
91
- def append(self, message: MessageParamT):
92
- self.history.append(message)
139
+ Returns:
140
+ Combined list of prompt messages and optionally history messages
141
+ """
142
+ if include_history:
143
+ return self.prompt_messages + self.history
144
+ else:
145
+ return self.prompt_messages.copy()
93
146
 
94
- def get(self) -> List[MessageParamT]:
95
- return self.history
147
+ def clear(self, clear_prompts: bool = False):
148
+ """
149
+ Clear history and optionally prompt messages.
96
150
 
97
- def clear(self):
151
+ Args:
152
+ clear_prompts: If True, also clear prompt messages
153
+ """
98
154
  self.history = []
155
+ if clear_prompts:
156
+ self.prompt_messages = []
99
157
 
100
158
 
101
159
  class RequestParams(CreateMessageRequestParams):
@@ -224,6 +282,9 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol[MessageParamT, Message
224
282
  )
225
283
  self.history: Memory[MessageParamT] = SimpleMemory[MessageParamT]()
226
284
 
285
+ # Initialize the display component
286
+ self.display = ConsoleDisplay(config=self.context.config)
287
+
227
288
  # Set initial model preferences
228
289
  self.model_preferences = ModelPreferences(
229
290
  costPriority=0.3,
@@ -315,14 +376,6 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol[MessageParamT, Message
315
376
  self, default_params: RequestParams, provided_params: RequestParams
316
377
  ) -> RequestParams:
317
378
  """Merge default and provided request parameters"""
318
- # Log parameter merging if debug logging is enabled
319
- # self.context.config.logger.debug(
320
- # "Merging provided request params with defaults",
321
- # extra={
322
- # "defaults": default_params.model_dump(),
323
- # "provided": provided_params.model_dump(),
324
- # },
325
- # )
326
379
 
327
380
  merged = default_params.model_dump()
328
381
  merged.update(provided_params.model_dump(exclude_unset=True))
@@ -374,6 +427,9 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol[MessageParamT, Message
374
427
  """Convert an MCP message (SamplingMessage) to an LLM input type."""
375
428
  return self.type_converter.from_mcp_message_param(param)
376
429
 
430
+ def from_mcp_prompt_message(self, message: PromptMessage) -> MessageParamT:
431
+ return self.type_converter.from_mcp_prompt_message(message)
432
+
377
433
  @classmethod
378
434
  def convert_message_to_message_param(
379
435
  cls, message: MessageT, **kwargs
@@ -397,113 +453,15 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol[MessageParamT, Message
397
453
 
398
454
  def show_tool_result(self, result: CallToolResult):
399
455
  """Display a tool result in a formatted panel."""
400
-
401
- if not self.context.config.logger.show_tools:
402
- return
403
-
404
- if result.isError:
405
- style = "red"
406
- else:
407
- style = "magenta"
408
-
409
- panel = Panel(
410
- Text(
411
- str(result.content), overflow="..."
412
- ), # TODO support multi-model/multi-part responses
413
- title="[TOOL RESULT]",
414
- title_align="right",
415
- style=style,
416
- border_style="bold white",
417
- padding=(1, 2),
418
- )
419
-
420
- if self.context.config.logger.truncate_tools:
421
- if len(str(result.content)) > 360:
422
- panel.height = 8
423
-
424
- console.console.print(panel)
425
- console.console.print("\n")
456
+ self.display.show_tool_result(result)
426
457
 
427
458
  def show_oai_tool_result(self, result):
428
459
  """Display a tool result in a formatted panel."""
429
-
430
- if not self.context.config.logger.show_tools:
431
- return
432
-
433
- panel = Panel(
434
- Text(str(result), overflow="..."), # TODO update openai support
435
- title="[TOOL RESULT]",
436
- title_align="right",
437
- style="magenta",
438
- border_style="bold white",
439
- padding=(1, 2),
440
- )
441
-
442
- if self.context.config.logger.truncate_tools:
443
- if len(str(result)) > 360:
444
- panel.height = 8
445
-
446
- console.console.print(panel)
447
- console.console.print("\n")
460
+ self.display.show_oai_tool_result(result)
448
461
 
449
462
  def show_tool_call(self, available_tools, tool_name, tool_args):
450
463
  """Display a tool call in a formatted panel."""
451
-
452
- if not self.context.config.logger.show_tools:
453
- return
454
-
455
- display_tool_list = Text()
456
- for display_tool in available_tools:
457
- # Handle both OpenAI and Anthropic tool formats
458
- # TODO -- this should really be using the ToolCall abstraction and converting at the concrete layer??
459
- if isinstance(display_tool, dict):
460
- if "function" in display_tool:
461
- # OpenAI format
462
- tool_call_name = display_tool["function"]["name"]
463
- else:
464
- # Anthropic format
465
- tool_call_name = display_tool["name"]
466
- else:
467
- # Handle potential object format (e.g., Pydantic models)
468
- tool_call_name = (
469
- display_tool.function.name
470
- if hasattr(display_tool, "function")
471
- else display_tool.name
472
- )
473
-
474
- parts = (
475
- tool_call_name.split(SEP)
476
- if SEP in tool_call_name
477
- else [tool_call_name, tool_call_name]
478
- )
479
- if tool_name.split(SEP)[0] == parts[0]:
480
- if tool_call_name == tool_name:
481
- style = "magenta"
482
- else:
483
- style = "dim white"
484
-
485
- shortened_name = (
486
- parts[1] if len(parts[1]) <= 12 else parts[1][:11] + "…"
487
- )
488
- display_tool_list.append(f"[{shortened_name}] ", style)
489
-
490
- panel = Panel(
491
- Text(str(tool_args), overflow="ellipsis"),
492
- title="[TOOL CALL]",
493
- title_align="left",
494
- style="magenta",
495
- border_style="bold white",
496
- subtitle=display_tool_list,
497
- subtitle_align="left",
498
- padding=(1, 2),
499
- )
500
-
501
- if self.context.config.logger.truncate_tools:
502
- if len(str(tool_args)) > 360:
503
- panel.height = 8
504
-
505
- console.console.print(panel)
506
- console.console.print("\n")
464
+ self.display.show_tool_call(available_tools, tool_name, tool_args)
507
465
 
508
466
  async def show_assistant_message(
509
467
  self,
@@ -512,62 +470,17 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol[MessageParamT, Message
512
470
  title: str = "ASSISTANT",
513
471
  ):
514
472
  """Display an assistant message in a formatted panel."""
515
-
516
- if not self.context.config.logger.show_chat:
517
- return
518
-
519
- mcp_server_name = (
520
- highlight_namespaced_tool.split(SEP)
521
- if SEP in highlight_namespaced_tool
522
- else [highlight_namespaced_tool]
523
- )
524
-
525
- display_server_list = Text()
526
-
527
- tools = await self.aggregator.list_tools()
528
- if any(tool.name == HUMAN_INPUT_TOOL_NAME for tool in tools.tools):
529
- style = (
530
- "green"
531
- if highlight_namespaced_tool == HUMAN_INPUT_TOOL_NAME
532
- else "dim white"
533
- )
534
- display_server_list.append("[human] ", style)
535
-
536
- for server_name in await self.aggregator.list_servers():
537
- style = "green" if server_name == mcp_server_name[0] else "dim white"
538
- display_server_list.append(f"[{server_name}] ", style)
539
-
540
- panel = Panel(
473
+ await self.display.show_assistant_message(
541
474
  message_text,
542
- title=f"[{title}]{f' ({self.name})' if self.name else ''}",
543
- title_align="left",
544
- style="green",
545
- border_style="bold white",
546
- padding=(1, 2),
547
- subtitle=display_server_list,
548
- subtitle_align="left",
475
+ aggregator=self.aggregator,
476
+ highlight_namespaced_tool=highlight_namespaced_tool,
477
+ title=title,
478
+ name=self.name,
549
479
  )
550
- console.console.print(panel)
551
- console.console.print("\n")
552
480
 
553
481
  def show_user_message(self, message, model: str | None, chat_turn: int):
554
482
  """Display a user message in a formatted panel."""
555
-
556
- if not self.context.config.logger.show_chat:
557
- return
558
-
559
- panel = Panel(
560
- message,
561
- title=f"{f'({self.name}) [USER]' if self.name else '[USER]'}",
562
- title_align="right",
563
- style="blue",
564
- border_style="bold white",
565
- padding=(1, 2),
566
- subtitle=Text(f"{model} turn {chat_turn}", style="dim white"),
567
- subtitle_align="left",
568
- )
569
- console.console.print(panel)
570
- console.console.print("\n")
483
+ self.display.show_user_message(message, model, chat_turn, name=self.name)
571
484
 
572
485
  async def pre_tool_call(
573
486
  self, tool_call_id: str | None, request: CallToolRequest
@@ -633,11 +546,62 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol[MessageParamT, Message
633
546
  )
634
547
 
635
548
  def message_param_str(self, message: MessageParamT) -> str:
636
- """Convert an input message to a string representation."""
549
+ """
550
+ Convert an input message to a string representation.
551
+ Tries to extract just the content when possible.
552
+ """
553
+ if isinstance(message, dict):
554
+ # For dictionary format messages
555
+ if "content" in message:
556
+ content = message["content"]
557
+ # Handle both string and structured content formats
558
+ if isinstance(content, str):
559
+ return content
560
+ elif isinstance(content, list) and content:
561
+ # Try to extract text from content parts
562
+ text_parts = []
563
+ for part in content:
564
+ if isinstance(part, dict) and "text" in part:
565
+ text_parts.append(part["text"])
566
+ elif hasattr(part, "text"):
567
+ text_parts.append(part.text)
568
+ if text_parts:
569
+ return "\n".join(text_parts)
570
+
571
+ # For objects with content attribute
572
+ if hasattr(message, "content"):
573
+ content = message.content
574
+ if isinstance(content, str):
575
+ return content
576
+ elif hasattr(content, "text"):
577
+ return content.text
578
+
579
+ # Default fallback
637
580
  return str(message)
638
581
 
639
582
  def message_str(self, message: MessageT) -> str:
640
- """Convert an output message to a string representation."""
583
+ """
584
+ Convert an output message to a string representation.
585
+ Tries to extract just the content when possible.
586
+ """
587
+ # First try to use the same method for consistency
588
+ result = self.message_param_str(message)
589
+ if result != str(message):
590
+ return result
591
+
592
+ # Additional handling for output-specific formats
593
+ if hasattr(message, "content"):
594
+ content = message.content
595
+ if isinstance(content, list):
596
+ # Extract text from content blocks
597
+ text_parts = []
598
+ for block in content:
599
+ if hasattr(block, "text") and block.text:
600
+ text_parts.append(block.text)
601
+ if text_parts:
602
+ return "\n".join(text_parts)
603
+
604
+ # Default fallback
641
605
  return str(message)
642
606
 
643
607
  def _log_chat_progress(
@@ -668,6 +632,108 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol[MessageParamT, Message
668
632
  }
669
633
  self.logger.debug("Chat finished", data=data)
670
634
 
635
+ def _convert_prompt_messages(
636
+ self, prompt_messages: List[PromptMessage]
637
+ ) -> List[MessageParamT]:
638
+ """
639
+ Convert prompt messages to this LLM's specific message format.
640
+ To be implemented by concrete LLM classes.
641
+ """
642
+ raise NotImplementedError("Must be implemented by subclass")
643
+
644
+ async def show_prompt_loaded(
645
+ self,
646
+ prompt_name: str,
647
+ description: Optional[str] = None,
648
+ message_count: int = 0,
649
+ arguments: Optional[dict[str, str]] = None,
650
+ ):
651
+ """
652
+ Display information about a loaded prompt template.
653
+
654
+ Args:
655
+ prompt_name: The name of the prompt
656
+ description: Optional description of the prompt
657
+ message_count: Number of messages in the prompt
658
+ arguments: Optional dictionary of arguments passed to the prompt
659
+ """
660
+ await self.display.show_prompt_loaded(
661
+ prompt_name=prompt_name,
662
+ description=description,
663
+ message_count=message_count,
664
+ agent_name=self.name,
665
+ aggregator=self.aggregator,
666
+ arguments=arguments,
667
+ )
668
+
669
+ async def apply_prompt_template(
670
+ self, prompt_result: GetPromptResult, prompt_name: str
671
+ ) -> str:
672
+ """
673
+ Apply a prompt template by adding it to the conversation history.
674
+ If the last message in the prompt is from a user, automatically
675
+ generate an assistant response.
676
+
677
+ Args:
678
+ prompt_result: The GetPromptResult containing prompt messages
679
+ prompt_name: The name of the prompt being applied
680
+
681
+ Returns:
682
+ String representation of the assistant's response if generated,
683
+ or the last assistant message in the prompt
684
+ """
685
+ prompt_messages: List[PromptMessage] = prompt_result.messages
686
+
687
+ # Check if we have any messages
688
+ if not prompt_messages:
689
+ return "Prompt contains no messages"
690
+
691
+ # Extract arguments if they were stored in the result
692
+ arguments = getattr(prompt_result, "arguments", None)
693
+
694
+ # Display information about the loaded prompt
695
+ await self.show_prompt_loaded(
696
+ prompt_name=prompt_name,
697
+ description=prompt_result.description,
698
+ message_count=len(prompt_messages),
699
+ arguments=arguments,
700
+ )
701
+
702
+ # Check the last message role
703
+ last_message = prompt_messages[-1]
704
+
705
+ if last_message.role == "user":
706
+ # For user messages: Add all previous messages to history, then generate response to the last one
707
+ self.logger.debug("Last message in prompt is from user, generating assistant response")
708
+
709
+ # Add all but the last message to history
710
+ if len(prompt_messages) > 1:
711
+ previous_messages = prompt_messages[:-1]
712
+ converted = []
713
+ for msg in previous_messages:
714
+ converted.append(self.type_converter.from_mcp_prompt_message(msg))
715
+ self.history.extend(converted, is_prompt=True)
716
+
717
+ # Extract the user's question and generate a response
718
+ user_content = last_message.content
719
+ user_text = user_content.text if hasattr(user_content, "text") else str(user_content)
720
+
721
+ return await self.generate_str(user_text)
722
+ else:
723
+ # For assistant messages: Add all messages to history and return the last one
724
+ self.logger.debug("Last message in prompt is from assistant, returning it directly")
725
+
726
+ # Convert and add all messages to history
727
+ converted = []
728
+ for msg in prompt_messages:
729
+ converted.append(self.type_converter.from_mcp_prompt_message(msg))
730
+ self.history.extend(converted, is_prompt=True)
731
+
732
+ # Return the assistant's message
733
+ content = last_message.content
734
+ return content.text if hasattr(content, "text") else str(content)
735
+
736
+
671
737
 
672
738
  class PassthroughLLM(AugmentedLLM):
673
739
  """
@@ -39,8 +39,10 @@ from mcp_agent.workflows.llm.augmented_llm import (
39
39
  )
40
40
  from mcp_agent.core.exceptions import ProviderKeyError
41
41
  from mcp_agent.logging.logger import get_logger
42
+ from mcp.types import PromptMessage
42
43
  from rich.text import Text
43
44
 
45
+ _logger = get_logger(__name__)
44
46
  DEFAULT_ANTHROPIC_MODEL = "claude-3-7-sonnet-latest"
45
47
 
46
48
 
@@ -94,8 +96,9 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
94
96
  "Please check that your API key is valid and not expired.",
95
97
  ) from e
96
98
 
97
- if params.use_history:
98
- messages.extend(self.history.get())
99
+ # Always include prompt messages, but only include conversation history
100
+ # if use_history is True
101
+ messages.extend(self.history.get(include_history=params.use_history))
99
102
 
100
103
  if isinstance(message, str):
101
104
  messages.append({"role": "user", "content": message})
@@ -287,8 +290,17 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
287
290
  )
288
291
  )
289
292
 
293
+ # Only save the new conversation messages to history if use_history is true
294
+ # Keep the prompt messages separate
290
295
  if params.use_history:
291
- self.history.set(messages)
296
+ # Get current prompt messages
297
+ prompt_messages = self.history.get(include_history=False)
298
+
299
+ # Calculate new conversation messages (excluding prompts)
300
+ new_messages = messages[len(prompt_messages):]
301
+
302
+ # Update conversation history
303
+ self.history.set(new_messages)
292
304
 
293
305
  self._log_chat_finished(model=model)
294
306
 
@@ -525,6 +537,40 @@ class AnthropicMCPTypeConverter(ProviderToMCPConverter[MessageParam, Message]):
525
537
  **typed_dict_extras(param, ["role", "content"]),
526
538
  )
527
539
 
540
+ @classmethod
541
+ def from_mcp_prompt_message(cls, message: PromptMessage) -> MessageParam:
542
+ """Convert an MCP PromptMessage to an Anthropic MessageParam."""
543
+
544
+ # Extract content text
545
+ content_text = (
546
+ message.content.text
547
+ if hasattr(message.content, "text")
548
+ else str(message.content)
549
+ )
550
+
551
+ # Extract extras for flexibility
552
+ extras = message.model_dump(exclude={"role", "content"})
553
+
554
+ # Handle based on role
555
+ if message.role == "user":
556
+ return {"role": "user", "content": content_text, **extras}
557
+ elif message.role == "assistant":
558
+ return {
559
+ "role": "assistant",
560
+ "content": [{"type": "text", "text": content_text}],
561
+ **extras,
562
+ }
563
+ else:
564
+ # Fall back to user for any unrecognized role, including "system"
565
+ _logger.warning(
566
+ f"Unsupported role '{message.role}' in PromptMessage. Falling back to 'user' role."
567
+ )
568
+ return {
569
+ "role": "user",
570
+ "content": f"[{message.role.upper()}] {content_text}",
571
+ **extras,
572
+ }
573
+
528
574
 
529
575
  def mcp_content_to_anthropic_content(
530
576
  content: TextContent | ImageContent | EmbeddedResource,