fast-agent-mcp 0.2.42__py3-none-any.whl → 0.2.44__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of fast-agent-mcp might be problematic. Click here for more details.

Files changed (31) hide show
  1. {fast_agent_mcp-0.2.42.dist-info → fast_agent_mcp-0.2.44.dist-info}/METADATA +3 -2
  2. {fast_agent_mcp-0.2.42.dist-info → fast_agent_mcp-0.2.44.dist-info}/RECORD +31 -30
  3. mcp_agent/agents/base_agent.py +60 -22
  4. mcp_agent/config.py +2 -0
  5. mcp_agent/core/agent_app.py +15 -5
  6. mcp_agent/core/enhanced_prompt.py +87 -13
  7. mcp_agent/core/fastagent.py +9 -1
  8. mcp_agent/core/interactive_prompt.py +60 -1
  9. mcp_agent/core/usage_display.py +10 -3
  10. mcp_agent/llm/augmented_llm.py +4 -5
  11. mcp_agent/llm/augmented_llm_passthrough.py +15 -0
  12. mcp_agent/llm/providers/augmented_llm_anthropic.py +4 -3
  13. mcp_agent/llm/providers/augmented_llm_bedrock.py +3 -3
  14. mcp_agent/llm/providers/augmented_llm_google_native.py +4 -7
  15. mcp_agent/llm/providers/augmented_llm_openai.py +5 -8
  16. mcp_agent/llm/providers/augmented_llm_tensorzero.py +6 -7
  17. mcp_agent/llm/providers/google_converter.py +6 -9
  18. mcp_agent/llm/providers/multipart_converter_anthropic.py +5 -4
  19. mcp_agent/llm/providers/multipart_converter_openai.py +33 -0
  20. mcp_agent/llm/providers/multipart_converter_tensorzero.py +3 -2
  21. mcp_agent/logging/rich_progress.py +6 -2
  22. mcp_agent/logging/transport.py +30 -36
  23. mcp_agent/mcp/helpers/content_helpers.py +26 -11
  24. mcp_agent/mcp/interfaces.py +22 -2
  25. mcp_agent/mcp/mcp_aggregator.py +22 -3
  26. mcp_agent/mcp/prompt_message_multipart.py +2 -3
  27. mcp_agent/ui/console_display.py +353 -142
  28. mcp_agent/ui/console_display_legacy.py +401 -0
  29. {fast_agent_mcp-0.2.42.dist-info → fast_agent_mcp-0.2.44.dist-info}/WHEEL +0 -0
  30. {fast_agent_mcp-0.2.42.dist-info → fast_agent_mcp-0.2.44.dist-info}/entry_points.txt +0 -0
  31. {fast_agent_mcp-0.2.42.dist-info → fast_agent_mcp-0.2.44.dist-info}/licenses/LICENSE +0 -0
@@ -30,7 +30,7 @@ from mcp_agent.core.enhanced_prompt import (
30
30
  handle_special_commands,
31
31
  )
32
32
  from mcp_agent.core.usage_display import collect_agents_from_provider, display_usage_report
33
- from mcp_agent.mcp.mcp_aggregator import SEP # Import SEP once at the top
33
+ from mcp_agent.mcp.mcp_aggregator import SEP
34
34
  from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
35
35
  from mcp_agent.progress_display import progress_display
36
36
 
@@ -56,6 +56,7 @@ class PromptProvider(Protocol):
56
56
  prompt_title: Optional[str] = None,
57
57
  arguments: Optional[Dict[str, str]] = None,
58
58
  agent_name: Optional[str] = None,
59
+ as_template: bool = False,
59
60
  **kwargs,
60
61
  ) -> str:
61
62
  """Apply a prompt."""
@@ -188,6 +189,10 @@ class InteractivePrompt:
188
189
  # Handle usage display
189
190
  await self._show_usage(prompt_provider, agent)
190
191
  continue
192
+ elif "show_markdown" in command_result:
193
+ # Handle markdown display
194
+ await self._show_markdown(prompt_provider, agent)
195
+ continue
191
196
 
192
197
  # Skip further processing if:
193
198
  # 1. The command was handled (command_result is truthy)
@@ -713,3 +718,57 @@ class InteractivePrompt:
713
718
 
714
719
  except Exception as e:
715
720
  rich_print(f"[red]Error showing usage: {e}[/red]")
721
+
722
+ async def _show_markdown(self, prompt_provider: PromptProvider, agent_name: str) -> None:
723
+ """
724
+ Show the last assistant message without markdown formatting.
725
+
726
+ Args:
727
+ prompt_provider: Provider that has access to agents
728
+ agent_name: Name of the current agent
729
+ """
730
+ try:
731
+ # Get agent to display from
732
+ if hasattr(prompt_provider, "_agent"):
733
+ # This is an AgentApp - get the specific agent
734
+ agent = prompt_provider._agent(agent_name)
735
+ else:
736
+ # This is a single agent
737
+ agent = prompt_provider
738
+
739
+ # Check if agent has message history
740
+ if not hasattr(agent, "_llm") or not agent._llm:
741
+ rich_print("[yellow]No message history available[/yellow]")
742
+ return
743
+
744
+ message_history = agent._llm.message_history
745
+ if not message_history:
746
+ rich_print("[yellow]No messages in history[/yellow]")
747
+ return
748
+
749
+ # Find the last assistant message
750
+ last_assistant_msg = None
751
+ for msg in reversed(message_history):
752
+ if msg.role == "assistant":
753
+ last_assistant_msg = msg
754
+ break
755
+
756
+ if not last_assistant_msg:
757
+ rich_print("[yellow]No assistant messages found[/yellow]")
758
+ return
759
+
760
+ # Get the text content and display without markdown
761
+ content = last_assistant_msg.last_text()
762
+
763
+ # Display with a simple header
764
+ rich_print("\n[bold blue]Last Assistant Response (Plain Text):[/bold blue]")
765
+ rich_print("─" * 60)
766
+ # Use console.print with markup=False to display raw text
767
+ from mcp_agent import console
768
+
769
+ console.console.print(content, markup=False)
770
+ rich_print("─" * 60)
771
+ rich_print()
772
+
773
+ except Exception as e:
774
+ rich_print(f"[red]Error showing markdown: {e}[/red]")
@@ -91,12 +91,19 @@ def display_usage_report(
91
91
  max_agent_width = min(15, max(len(data["name"]) for data in usage_data) if usage_data else 8)
92
92
  agent_width = max(max_agent_width, 5) # Minimum of 5 for "Agent" header
93
93
 
94
- # Display the table
94
+ # Display the table with new visual style
95
95
  console = Console()
96
+
97
+ # Top separator
98
+ console.print()
99
+ console.print("─" * console.size.width, style="dim")
100
+ console.print()
101
+
102
+ # Header with block character
103
+ console.print("[dim]▎[/dim] [bold dim]Usage Summary[/bold dim]")
96
104
  console.print()
97
- console.print("[dim]Usage Summary (Cumulative)[/dim]")
98
105
 
99
- # Print header with proper spacing
106
+ # Table header with proper spacing
100
107
  console.print(
101
108
  f"[dim]{'Agent':<{agent_width}} {'Input':>9} {'Output':>9} {'Total':>9} {'Turns':>6} {'Tools':>6} {'Context%':>9} {'Model':<25}[/dim]"
102
109
  )
@@ -46,7 +46,6 @@ from mcp_agent.mcp.interfaces import (
46
46
  from mcp_agent.mcp.mcp_aggregator import MCPAggregator
47
47
  from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
48
48
  from mcp_agent.mcp.prompt_render import render_multipart_message
49
- from mcp_agent.ui.console_display import ConsoleDisplay
50
49
 
51
50
  # Define type variables locally
52
51
  MessageParamT = TypeVar("MessageParamT")
@@ -157,6 +156,10 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
157
156
  self._message_history: List[PromptMessageMultipart] = []
158
157
 
159
158
  # Initialize the display component
159
+ if self.context.config and self.context.config.logger.use_legacy_display:
160
+ from mcp_agent.ui.console_display_legacy import ConsoleDisplay
161
+ else:
162
+ from mcp_agent.ui.console_display import ConsoleDisplay
160
163
  self.display = ConsoleDisplay(config=self.context.config)
161
164
 
162
165
  # Tool call counter for current turn
@@ -448,10 +451,6 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
448
451
  """Display a tool result in a formatted panel."""
449
452
  self.display.show_tool_result(result, name=self.name)
450
453
 
451
- def show_oai_tool_result(self, result: str) -> None:
452
- """Display a tool result in a formatted panel."""
453
- self.display.show_oai_tool_result(result, name=self.name)
454
-
455
454
  def show_tool_call(self, available_tools, tool_name, tool_args) -> None:
456
455
  """Display a tool call in a formatted panel."""
457
456
  self._current_turn_tool_calls += 1
@@ -162,8 +162,17 @@ class PassthroughLLM(AugmentedLLM):
162
162
  self,
163
163
  multipart_messages: List["PromptMessageMultipart"],
164
164
  request_params: RequestParams | None = None,
165
+ is_template: bool = False,
165
166
  ) -> PromptMessageMultipart:
167
+ print(
168
+ f"DEBUG: PassthroughLLM _apply_prompt_provider_specific called with {len(multipart_messages)} messages, is_template={is_template}"
169
+ )
170
+
171
+ # Add messages to history with proper is_prompt flag
172
+ self.history.extend(multipart_messages, is_prompt=is_template)
173
+
166
174
  last_message = multipart_messages[-1]
175
+ print(f"DEBUG: Last message role: {last_message.role}, text: '{last_message.first_text()}'")
167
176
 
168
177
  if self.is_tool_call(last_message):
169
178
  result = Prompt.assistant(await self.generate_str(last_message.first_text()))
@@ -200,8 +209,14 @@ class PassthroughLLM(AugmentedLLM):
200
209
  else:
201
210
  # TODO -- improve when we support Audio/Multimodal gen models e.g. gemini . This should really just return the input as "assistant"...
202
211
  concatenated: str = "\n".join(message.all_text() for message in multipart_messages)
212
+ print(
213
+ f"DEBUG: PassthroughLLM generating response: '{concatenated}' (is_template={is_template})"
214
+ )
203
215
  await self.show_assistant_message(concatenated)
204
216
  result = Prompt.assistant(concatenated)
217
+ print(f"DEBUG: PassthroughLLM created result: {result}")
218
+ print(f"DEBUG: Result first_text(): {result.first_text()}")
219
+ print(f"DEBUG: Result content: {result.content}")
205
220
 
206
221
  # Track usage for this passthrough "turn"
207
222
  try:
@@ -1,6 +1,6 @@
1
1
  from typing import TYPE_CHECKING, List, Tuple, Type
2
2
 
3
- from mcp.types import EmbeddedResource, ImageContent, TextContent
3
+ from mcp.types import TextContent
4
4
 
5
5
  from mcp_agent.core.prompt import Prompt
6
6
  from mcp_agent.event_progress import ProgressAction
@@ -33,6 +33,7 @@ from anthropic.types import (
33
33
  from mcp.types import (
34
34
  CallToolRequest,
35
35
  CallToolRequestParams,
36
+ ContentBlock,
36
37
  )
37
38
  from rich.text import Text
38
39
 
@@ -149,7 +150,7 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
149
150
  self,
150
151
  message_param,
151
152
  request_params: RequestParams | None = None,
152
- ) -> list[TextContent | ImageContent | EmbeddedResource]:
153
+ ) -> list[ContentBlock]:
153
154
  """
154
155
  Process a query using an LLM and available tools.
155
156
  Override this method to use a different LLM.
@@ -190,7 +191,7 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
190
191
  for tool in tool_list.tools
191
192
  ]
192
193
 
193
- responses: List[TextContent | ImageContent | EmbeddedResource] = []
194
+ responses: List[ContentBlock] = []
194
195
 
195
196
  model = self.default_request_params.model
196
197
 
@@ -4,7 +4,7 @@ import re
4
4
  from enum import Enum
5
5
  from typing import TYPE_CHECKING, Any, Dict, List, Tuple, Type, Union
6
6
 
7
- from mcp.types import EmbeddedResource, ImageContent, TextContent
7
+ from mcp.types import ContentBlock, TextContent
8
8
  from rich.text import Text
9
9
 
10
10
  from mcp_agent.core.exceptions import ProviderKeyError
@@ -1066,7 +1066,7 @@ class BedrockAugmentedLLM(AugmentedLLM[BedrockMessageParam, BedrockMessage]):
1066
1066
  self,
1067
1067
  message_param: BedrockMessageParam,
1068
1068
  request_params: RequestParams | None = None,
1069
- ) -> List[TextContent | ImageContent | EmbeddedResource]:
1069
+ ) -> List[ContentBlock | CallToolRequestParams]:
1070
1070
  """
1071
1071
  Process a query using Bedrock and available tools.
1072
1072
  """
@@ -1120,7 +1120,7 @@ class BedrockAugmentedLLM(AugmentedLLM[BedrockMessageParam, BedrockMessage]):
1120
1120
  f"Model {model_to_check} does not support tool use - skipping tool preparation"
1121
1121
  )
1122
1122
 
1123
- responses: List[TextContent | ImageContent | EmbeddedResource] = []
1123
+ responses: List[ContentBlock] = []
1124
1124
  model = self.default_request_params.model
1125
1125
 
1126
1126
  for i in range(params.max_iterations):
@@ -10,8 +10,7 @@ from mcp.types import (
10
10
  CallToolRequest,
11
11
  CallToolRequestParams,
12
12
  CallToolResult,
13
- EmbeddedResource,
14
- ImageContent,
13
+ ContentBlock,
15
14
  TextContent,
16
15
  )
17
16
  from rich.text import Text
@@ -228,12 +227,12 @@ class GoogleNativeAugmentedLLM(AugmentedLLM[types.Content, types.Content]):
228
227
  async def _google_completion(
229
228
  self,
230
229
  request_params: RequestParams | None = None,
231
- ) -> List[TextContent | ImageContent | EmbeddedResource]:
230
+ ) -> List[ContentBlock]:
232
231
  """
233
232
  Process a query using Google's generate_content API and available tools.
234
233
  """
235
234
  request_params = self.get_request_params(request_params=request_params)
236
- responses: List[TextContent | ImageContent | EmbeddedResource] = []
235
+ responses: List[ContentBlock] = []
237
236
 
238
237
  # Load full conversation history if use_history is true
239
238
  if request_params.use_history:
@@ -376,9 +375,7 @@ class GoogleNativeAugmentedLLM(AugmentedLLM[types.Content, types.Content]):
376
375
 
377
376
  # Execute the tool call. google.genai does not provide a tool_call_id, pass None.
378
377
  result = await self.call_tool(tool_call_request, None)
379
- self.show_oai_tool_result(
380
- str(result.content)
381
- ) # Use show_oai_tool_result for consistency
378
+ self.show_tool_result(result)
382
379
 
383
380
  tool_results.append((tool_call_params.name, result)) # Store name and result
384
381
 
@@ -4,8 +4,7 @@ from mcp.types import (
4
4
  CallToolRequest,
5
5
  CallToolRequestParams,
6
6
  CallToolResult,
7
- EmbeddedResource,
8
- ImageContent,
7
+ ContentBlock,
9
8
  TextContent,
10
9
  )
11
10
  from openai import AsyncOpenAI, AuthenticationError
@@ -298,7 +297,7 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
298
297
  self,
299
298
  message: OpenAIMessage,
300
299
  request_params: RequestParams | None = None,
301
- ) -> List[TextContent | ImageContent | EmbeddedResource]:
300
+ ) -> List[ContentBlock]:
302
301
  """
303
302
  Process a query using an LLM and available tools.
304
303
  The default implementation uses OpenAI's ChatCompletion as the LLM.
@@ -307,7 +306,7 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
307
306
 
308
307
  request_params = self.get_request_params(request_params=request_params)
309
308
 
310
- responses: List[TextContent | ImageContent | EmbeddedResource] = []
309
+ responses: List[ContentBlock] = []
311
310
 
312
311
  # TODO -- move this in to agent context management / agent group handling
313
312
  messages: List[ChatCompletionMessageParam] = []
@@ -427,7 +426,7 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
427
426
  ),
428
427
  )
429
428
  result = await self.call_tool(tool_call_request, tool_call.id)
430
- self.show_oai_tool_result(str(result))
429
+ self.show_tool_result(result)
431
430
 
432
431
  tool_results.append((tool_call.id, result))
433
432
  responses.extend(result.content)
@@ -511,9 +510,7 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
511
510
 
512
511
  # For assistant messages: Return the last message (no completion needed)
513
512
  message_param: OpenAIMessage = OpenAIConverter.convert_to_openai(last_message)
514
- responses: List[
515
- TextContent | ImageContent | EmbeddedResource
516
- ] = await self._openai_completion(
513
+ responses: List[ContentBlock] = await self._openai_completion(
517
514
  message_param,
518
515
  request_params,
519
516
  )
@@ -5,8 +5,7 @@ from mcp.types import (
5
5
  CallToolRequest,
6
6
  CallToolRequestParams,
7
7
  CallToolResult,
8
- EmbeddedResource,
9
- ImageContent,
8
+ ContentBlock,
10
9
  TextContent,
11
10
  )
12
11
  from tensorzero import AsyncTensorZeroGateway
@@ -169,7 +168,7 @@ class TensorZeroAugmentedLLM(AugmentedLLM[Dict[str, Any], Any]):
169
168
  available_tools: Optional[List[Dict[str, Any]]] = await self._prepare_t0_tools()
170
169
 
171
170
  # [3] Initialize storage arrays for the text content of the assistant message reply and, optionally, tool calls and results, and begin inference loop
172
- final_assistant_message: List[Union[TextContent, ImageContent, EmbeddedResource]] = []
171
+ final_assistant_message: List[ContentBlock] = []
173
172
  last_executed_results: Optional[List[CallToolResult]] = None
174
173
 
175
174
  for i in range(merged_params.max_iterations):
@@ -353,11 +352,11 @@ class TensorZeroAugmentedLLM(AugmentedLLM[Dict[str, Any], Any]):
353
352
  completion: Union[ChatInferenceResponse, JsonInferenceResponse],
354
353
  available_tools_for_display: Optional[List[Dict[str, Any]]] = None,
355
354
  ) -> Tuple[
356
- List[Union[TextContent, ImageContent, EmbeddedResource]], # Text/Image content ONLY
355
+ List[Union[ContentBlock]], # Text/Image content ONLY
357
356
  List[CallToolResult], # Executed results
358
357
  List[Any], # Raw tool_call blocks
359
358
  ]:
360
- content_parts_this_turn: List[Union[TextContent, ImageContent, EmbeddedResource]] = []
359
+ content_parts_this_turn: List[ContentBlock] = []
361
360
  executed_tool_results: List[CallToolResult] = []
362
361
  raw_tool_call_blocks_from_t0: List[Any] = []
363
362
 
@@ -402,7 +401,7 @@ class TensorZeroAugmentedLLM(AugmentedLLM[Dict[str, Any], Any]):
402
401
  setattr(result, "_t0_tool_name_temp", tool_name)
403
402
  setattr(result, "_t0_is_error_temp", False)
404
403
  executed_tool_results.append(result)
405
- self.show_oai_tool_result(str(result))
404
+ self.show_tool_result(result)
406
405
  except Exception as e:
407
406
  self.logger.error(
408
407
  f"Error executing tool {tool_name} (id: {tool_use_id}): {e}"
@@ -415,7 +414,7 @@ class TensorZeroAugmentedLLM(AugmentedLLM[Dict[str, Any], Any]):
415
414
  setattr(error_result, "_t0_tool_name_temp", tool_name)
416
415
  setattr(error_result, "_t0_is_error_temp", True)
417
416
  executed_tool_results.append(error_result)
418
- self.show_oai_tool_result(f"ERROR: {error_text}")
417
+ self.show_tool_result(error_result)
419
418
 
420
419
  elif block_type == "thought":
421
420
  thought_text = getattr(block, "text", None)
@@ -8,6 +8,7 @@ from mcp.types import (
8
8
  CallToolRequest,
9
9
  CallToolRequestParams,
10
10
  CallToolResult,
11
+ ContentBlock,
11
12
  EmbeddedResource,
12
13
  ImageContent,
13
14
  TextContent,
@@ -158,17 +159,15 @@ class GoogleConverter:
158
159
 
159
160
  def convert_from_google_content(
160
161
  self, content: types.Content
161
- ) -> List[TextContent | ImageContent | EmbeddedResource | CallToolRequestParams]:
162
+ ) -> List[ContentBlock | CallToolRequestParams]:
162
163
  """
163
164
  Converts google.genai types.Content from a model response to a list of
164
165
  fast-agent content types or tool call requests.
165
166
  """
166
- fast_agent_parts: List[
167
- TextContent | ImageContent | EmbeddedResource | CallToolRequestParams
168
- ] = []
167
+ fast_agent_parts: List[ContentBlock | CallToolRequestParams] = []
169
168
 
170
- if content is None or not hasattr(content, 'parts') or content.parts is None:
171
- return [] # Google API response 'content' object is None. Cannot extract parts.
169
+ if content is None or not hasattr(content, "parts") or content.parts is None:
170
+ return [] # Google API response 'content' object is None. Cannot extract parts.
172
171
 
173
172
  for part in content.parts:
174
173
  if part.text:
@@ -340,9 +339,7 @@ class GoogleConverter:
340
339
  if content.role == "model" and any(part.function_call for part in content.parts):
341
340
  return PromptMessageMultipart(role="assistant", content=[])
342
341
 
343
- fast_agent_parts: List[
344
- TextContent | ImageContent | EmbeddedResource | CallToolRequestParams
345
- ] = []
342
+ fast_agent_parts: List[ContentBlock | CallToolRequestParams] = []
346
343
  for part in content.parts:
347
344
  if part.text:
348
345
  fast_agent_parts.append(TextContent(type="text", text=part.text))
@@ -16,6 +16,7 @@ from anthropic.types import (
16
16
  from mcp.types import (
17
17
  BlobResourceContents,
18
18
  CallToolResult,
19
+ ContentBlock,
19
20
  EmbeddedResource,
20
21
  ImageContent,
21
22
  PromptMessage,
@@ -117,7 +118,7 @@ class AnthropicConverter:
117
118
 
118
119
  @staticmethod
119
120
  def _convert_content_items(
120
- content_items: Sequence[Union[TextContent, ImageContent, EmbeddedResource]],
121
+ content_items: Sequence[ContentBlock],
121
122
  document_mode: bool = True,
122
123
  ) -> List[ContentBlockParam]:
123
124
  """
@@ -210,7 +211,7 @@ class AnthropicConverter:
210
211
  return ImageBlockParam(
211
212
  type="image", source=URLImageSourceParam(type="url", url=uri_str)
212
213
  )
213
-
214
+
214
215
  # Try to get image data
215
216
  image_data = get_image_data(resource)
216
217
  if image_data:
@@ -220,7 +221,7 @@ class AnthropicConverter:
220
221
  type="base64", media_type=mime_type, data=image_data
221
222
  ),
222
223
  )
223
-
224
+
224
225
  return AnthropicConverter._create_fallback_text("Image missing data", resource)
225
226
 
226
227
  elif mime_type == "application/pdf":
@@ -450,4 +451,4 @@ class AnthropicConverter:
450
451
  # Add separate blocks directly to the message
451
452
  content_blocks.extend(separate_blocks)
452
453
 
453
- return MessageParam(role="user", content=content_blocks)
454
+ return MessageParam(role="user", content=content_blocks)
@@ -5,6 +5,7 @@ from mcp.types import (
5
5
  EmbeddedResource,
6
6
  ImageContent,
7
7
  PromptMessage,
8
+ ResourceLink,
8
9
  TextContent,
9
10
  )
10
11
  from openai.types.chat import ChatCompletionMessageParam
@@ -16,6 +17,7 @@ from mcp_agent.mcp.helpers.content_helpers import (
16
17
  get_text,
17
18
  is_image_content,
18
19
  is_resource_content,
20
+ is_resource_link,
19
21
  is_text_content,
20
22
  )
21
23
  from mcp_agent.mcp.mime_utils import (
@@ -92,6 +94,11 @@ class OpenAIConverter:
92
94
  if block:
93
95
  content_blocks.append(block)
94
96
 
97
+ elif is_resource_link(item):
98
+ block = OpenAIConverter._convert_resource_link(item)
99
+ if block:
100
+ content_blocks.append(block)
101
+
95
102
  else:
96
103
  _logger.warning(f"Unsupported content type: {type(item)}")
97
104
  # Create a text block with information about the skipped content
@@ -213,6 +220,32 @@ class OpenAIConverter:
213
220
 
214
221
  return "text/plain"
215
222
 
223
+ @staticmethod
224
+ def _convert_resource_link(
225
+ resource: ResourceLink,
226
+ ) -> Optional[ContentBlock]:
227
+ """
228
+ Convert ResourceLink to OpenAI content block.
229
+
230
+ Args:
231
+ resource: The resource link to convert
232
+
233
+ Returns:
234
+ An OpenAI content block or None if conversion failed
235
+ """
236
+ name = resource.name or "unknown"
237
+ uri_str = str(resource.uri)
238
+ mime_type = resource.mimeType or "unknown"
239
+ description = resource.description or "No description"
240
+
241
+ # Create a text block with the resource link information
242
+ return {
243
+ "type": "text",
244
+ "text": f"Linked Resource ${name} MIME type {mime_type}>\n"
245
+ f"Resource Link: {uri_str}\n"
246
+ f"${description}\n",
247
+ }
248
+
216
249
  @staticmethod
217
250
  def _convert_embedded_resource(
218
251
  resource: EmbeddedResource,
@@ -1,8 +1,9 @@
1
1
  import json
2
- from typing import Any, Dict, List, Optional, Union
2
+ from typing import Any, Dict, List, Optional
3
3
 
4
4
  from mcp.types import (
5
5
  CallToolResult,
6
+ ContentBlock,
6
7
  EmbeddedResource,
7
8
  ImageContent,
8
9
  TextContent,
@@ -22,7 +23,7 @@ class TensorZeroConverter:
22
23
 
23
24
  @staticmethod
24
25
  def _convert_content_part(
25
- part: Union[TextContent, ImageContent, EmbeddedResource],
26
+ part: ContentBlock,
26
27
  ) -> Optional[Dict[str, Any]]:
27
28
  """Converts a single MCP content part to a T0 content block dictionary."""
28
29
  if isinstance(part, TextContent):
@@ -37,8 +37,12 @@ class RichProgressDisplay:
37
37
  self._progress.start()
38
38
 
39
39
  def stop(self) -> None:
40
- """stop"""
40
+ """Stop and clear the progress display."""
41
+ # Hide all tasks before stopping (like pause does)
42
+ for task in self._progress.tasks:
43
+ task.visible = False
41
44
  self._progress.stop()
45
+ self._paused = True
42
46
 
43
47
  def pause(self) -> None:
44
48
  """Pause the progress display."""
@@ -107,7 +111,7 @@ class RichProgressDisplay:
107
111
  description = f"[{self._get_action_style(event.action)}]{formatted_tokens}"
108
112
  else:
109
113
  description = f"[{self._get_action_style(event.action)}]{event.action.value:<15}"
110
-
114
+
111
115
  self._progress.update(
112
116
  task_id,
113
117
  description=description,