fast-agent-mcp 0.2.43__py3-none-any.whl → 0.2.45__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of fast-agent-mcp might be problematic. Click here for more details.

Files changed (37) hide show
  1. {fast_agent_mcp-0.2.43.dist-info → fast_agent_mcp-0.2.45.dist-info}/METADATA +6 -5
  2. {fast_agent_mcp-0.2.43.dist-info → fast_agent_mcp-0.2.45.dist-info}/RECORD +37 -36
  3. mcp_agent/agents/base_agent.py +60 -22
  4. mcp_agent/agents/workflow/evaluator_optimizer.py +39 -63
  5. mcp_agent/agents/workflow/router_agent.py +46 -21
  6. mcp_agent/config.py +2 -0
  7. mcp_agent/context.py +4 -0
  8. mcp_agent/core/agent_app.py +15 -5
  9. mcp_agent/core/direct_decorators.py +4 -5
  10. mcp_agent/core/enhanced_prompt.py +80 -11
  11. mcp_agent/core/fastagent.py +9 -1
  12. mcp_agent/core/interactive_prompt.py +60 -1
  13. mcp_agent/core/usage_display.py +10 -3
  14. mcp_agent/human_input/elicitation_form.py +16 -13
  15. mcp_agent/llm/augmented_llm.py +5 -7
  16. mcp_agent/llm/augmented_llm_passthrough.py +4 -0
  17. mcp_agent/llm/providers/augmented_llm_anthropic.py +258 -98
  18. mcp_agent/llm/providers/augmented_llm_bedrock.py +3 -3
  19. mcp_agent/llm/providers/augmented_llm_google_native.py +4 -7
  20. mcp_agent/llm/providers/augmented_llm_openai.py +5 -8
  21. mcp_agent/llm/providers/augmented_llm_tensorzero.py +6 -7
  22. mcp_agent/llm/providers/google_converter.py +6 -9
  23. mcp_agent/llm/providers/multipart_converter_anthropic.py +5 -4
  24. mcp_agent/llm/providers/multipart_converter_openai.py +33 -0
  25. mcp_agent/llm/providers/multipart_converter_tensorzero.py +3 -2
  26. mcp_agent/logging/rich_progress.py +6 -2
  27. mcp_agent/logging/transport.py +30 -36
  28. mcp_agent/mcp/helpers/content_helpers.py +26 -11
  29. mcp_agent/mcp/interfaces.py +22 -2
  30. mcp_agent/mcp/prompt_message_multipart.py +2 -3
  31. mcp_agent/resources/examples/workflows/evaluator.py +2 -2
  32. mcp_agent/resources/examples/workflows/router.py +1 -1
  33. mcp_agent/ui/console_display.py +363 -142
  34. mcp_agent/ui/console_display_legacy.py +401 -0
  35. {fast_agent_mcp-0.2.43.dist-info → fast_agent_mcp-0.2.45.dist-info}/WHEEL +0 -0
  36. {fast_agent_mcp-0.2.43.dist-info → fast_agent_mcp-0.2.45.dist-info}/entry_points.txt +0 -0
  37. {fast_agent_mcp-0.2.43.dist-info → fast_agent_mcp-0.2.45.dist-info}/licenses/LICENSE +0 -0
@@ -1,6 +1,7 @@
1
- from typing import TYPE_CHECKING, List, Tuple, Type
1
+ import json
2
+ from typing import TYPE_CHECKING, Any, List, Tuple, Type
2
3
 
3
- from mcp.types import EmbeddedResource, ImageContent, TextContent
4
+ from mcp.types import TextContent
4
5
 
5
6
  from mcp_agent.core.prompt import Prompt
6
7
  from mcp_agent.event_progress import ProgressAction
@@ -33,6 +34,8 @@ from anthropic.types import (
33
34
  from mcp.types import (
34
35
  CallToolRequest,
35
36
  CallToolRequestParams,
37
+ CallToolResult,
38
+ ContentBlock,
36
39
  )
37
40
  from rich.text import Text
38
41
 
@@ -98,6 +101,184 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
98
101
  cache_mode = self.context.config.anthropic.cache_mode
99
102
  return cache_mode
100
103
 
104
+ async def _prepare_tools(self, structured_model: Type[ModelT] | None = None) -> List[ToolParam]:
105
+ """Prepare tools based on whether we're in structured output mode."""
106
+ if structured_model:
107
+ # JSON mode - create a single tool for structured output
108
+ return [
109
+ ToolParam(
110
+ name="return_structured_output",
111
+ description="Return the response in the required JSON format",
112
+ input_schema=structured_model.model_json_schema(),
113
+ )
114
+ ]
115
+ else:
116
+ # Regular mode - use tools from aggregator
117
+ tool_list: ListToolsResult = await self.aggregator.list_tools()
118
+ return [
119
+ ToolParam(
120
+ name=tool.name,
121
+ description=tool.description or "",
122
+ input_schema=tool.inputSchema,
123
+ )
124
+ for tool in tool_list.tools
125
+ ]
126
+
127
+ def _apply_system_cache(self, base_args: dict, cache_mode: str) -> None:
128
+ """Apply cache control to system prompt if cache mode allows it."""
129
+ if cache_mode != "off" and base_args["system"]:
130
+ if isinstance(base_args["system"], str):
131
+ base_args["system"] = [
132
+ {
133
+ "type": "text",
134
+ "text": base_args["system"],
135
+ "cache_control": {"type": "ephemeral"},
136
+ }
137
+ ]
138
+ self.logger.debug(
139
+ "Applied cache_control to system prompt (caches tools+system in one block)"
140
+ )
141
+ else:
142
+ self.logger.debug(f"System prompt is not a string: {type(base_args['system'])}")
143
+
144
+ async def _apply_conversation_cache(self, messages: List[MessageParam], cache_mode: str) -> int:
145
+ """Apply conversation caching if in auto mode. Returns number of cache blocks applied."""
146
+ applied_count = 0
147
+ if cache_mode == "auto" and self.history.should_apply_conversation_cache():
148
+ cache_updates = self.history.get_conversation_cache_updates()
149
+
150
+ # Remove cache control from old positions
151
+ if cache_updates["remove"]:
152
+ self.history.remove_cache_control_from_messages(messages, cache_updates["remove"])
153
+ self.logger.debug(
154
+ f"Removed conversation cache_control from positions {cache_updates['remove']}"
155
+ )
156
+
157
+ # Add cache control to new positions
158
+ if cache_updates["add"]:
159
+ applied_count = self.history.add_cache_control_to_messages(
160
+ messages, cache_updates["add"]
161
+ )
162
+ if applied_count > 0:
163
+ self.history.apply_conversation_cache_updates(cache_updates)
164
+ self.logger.debug(
165
+ f"Applied conversation cache_control to positions {cache_updates['add']} ({applied_count} blocks)"
166
+ )
167
+ else:
168
+ self.logger.debug(
169
+ f"Failed to apply conversation cache_control to positions {cache_updates['add']}"
170
+ )
171
+
172
+ return applied_count
173
+
174
+ async def _process_structured_output(
175
+ self,
176
+ content_block: Any,
177
+ ) -> Tuple[str, CallToolResult, TextContent]:
178
+ """
179
+ Process a structured output tool call from Anthropic.
180
+
181
+ This handles the special case where Anthropic's model was forced to use
182
+ a 'return_structured_output' tool via tool_choice. The tool input contains
183
+ the JSON data we want, so we extract it and format it for display.
184
+
185
+ Even though we don't call an external tool, we must create a CallToolResult
186
+ to satisfy Anthropic's API requirement that every tool_use has a corresponding
187
+ tool_result in the next message.
188
+
189
+ Returns:
190
+ Tuple of (tool_use_id, tool_result, content_block) for the structured data
191
+ """
192
+ tool_args = content_block.input
193
+ tool_use_id = content_block.id
194
+
195
+ # Show the formatted JSON response to the user
196
+ json_response = json.dumps(tool_args, indent=2)
197
+ await self.show_assistant_message(json_response)
198
+
199
+ # Create the content for responses
200
+ structured_content = TextContent(type="text", text=json.dumps(tool_args))
201
+
202
+ # Create a CallToolResult to satisfy Anthropic's API requirements
203
+ # This represents the "result" of our structured output "tool"
204
+ tool_result = CallToolResult(isError=False, content=[structured_content])
205
+
206
+ return tool_use_id, tool_result, structured_content
207
+
208
+ async def _process_regular_tool_call(
209
+ self,
210
+ content_block: Any,
211
+ available_tools: List[ToolParam],
212
+ is_first_tool: bool,
213
+ message_text: str | Text,
214
+ ) -> Tuple[str, CallToolResult]:
215
+ """
216
+ Process a regular MCP tool call.
217
+
218
+ This handles actual tool execution via the MCP aggregator.
219
+ """
220
+ tool_name = content_block.name
221
+ tool_args = content_block.input
222
+ tool_use_id = content_block.id
223
+
224
+ if is_first_tool:
225
+ await self.show_assistant_message(message_text, tool_name)
226
+
227
+ self.show_tool_call(available_tools, tool_name, tool_args)
228
+ tool_call_request = CallToolRequest(
229
+ method="tools/call",
230
+ params=CallToolRequestParams(name=tool_name, arguments=tool_args),
231
+ )
232
+ result = await self.call_tool(request=tool_call_request, tool_call_id=tool_use_id)
233
+ self.show_tool_result(result)
234
+ return tool_use_id, result
235
+
236
+ async def _process_tool_calls(
237
+ self,
238
+ tool_uses: List[Any],
239
+ available_tools: List[ToolParam],
240
+ message_text: str | Text,
241
+ structured_model: Type[ModelT] | None = None,
242
+ ) -> Tuple[List[Tuple[str, CallToolResult]], List[ContentBlock]]:
243
+ """
244
+ Process tool calls, handling both structured output and regular MCP tools.
245
+
246
+ For structured output mode:
247
+ - Extracts JSON data from the forced 'return_structured_output' tool
248
+ - Does NOT create fake CallToolResults
249
+ - Returns the JSON content directly
250
+
251
+ For regular tools:
252
+ - Calls actual MCP tools via the aggregator
253
+ - Returns real CallToolResults
254
+ """
255
+ tool_results = []
256
+ responses = []
257
+
258
+ for tool_idx, content_block in enumerate(tool_uses):
259
+ tool_name = content_block.name
260
+ is_first_tool = tool_idx == 0
261
+
262
+ if tool_name == "return_structured_output" and structured_model:
263
+ # Structured output: extract JSON, don't call external tools
264
+ (
265
+ tool_use_id,
266
+ tool_result,
267
+ structured_content,
268
+ ) = await self._process_structured_output(content_block)
269
+ responses.append(structured_content)
270
+ # Add to tool_results to satisfy Anthropic's API requirement for tool_result messages
271
+ tool_results.append((tool_use_id, tool_result))
272
+ else:
273
+ # Regular tool: call external MCP tool
274
+ tool_use_id, tool_result = await self._process_regular_tool_call(
275
+ content_block, available_tools, is_first_tool, message_text
276
+ )
277
+ tool_results.append((tool_use_id, tool_result))
278
+ responses.extend(tool_result.content)
279
+
280
+ return tool_results, responses
281
+
101
282
  async def _process_stream(self, stream: AsyncMessageStream, model: str) -> Message:
102
283
  """Process the streaming response and display real-time token usage."""
103
284
  # Track estimated output tokens by counting text chunks
@@ -149,7 +330,8 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
149
330
  self,
150
331
  message_param,
151
332
  request_params: RequestParams | None = None,
152
- ) -> list[TextContent | ImageContent | EmbeddedResource]:
333
+ structured_model: Type[ModelT] | None = None,
334
+ ) -> list[ContentBlock]:
153
335
  """
154
336
  Process a query using an LLM and available tools.
155
337
  Override this method to use a different LLM.
@@ -180,17 +362,9 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
180
362
  cache_mode = self._get_cache_mode()
181
363
  self.logger.debug(f"Anthropic cache_mode: {cache_mode}")
182
364
 
183
- tool_list: ListToolsResult = await self.aggregator.list_tools()
184
- available_tools: List[ToolParam] = [
185
- ToolParam(
186
- name=tool.name,
187
- description=tool.description or "",
188
- input_schema=tool.inputSchema,
189
- )
190
- for tool in tool_list.tools
191
- ]
365
+ available_tools = await self._prepare_tools(structured_model)
192
366
 
193
- responses: List[TextContent | ImageContent | EmbeddedResource] = []
367
+ responses: List[ContentBlock] = []
194
368
 
195
369
  model = self.default_request_params.model
196
370
 
@@ -208,59 +382,25 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
208
382
  "tools": available_tools,
209
383
  }
210
384
 
211
- # Apply cache_control to system prompt if cache_mode is not "off"
212
- # This caches both tools and system prompt together in one cache block
213
- if cache_mode != "off" and base_args["system"]:
214
- if isinstance(base_args["system"], str):
215
- base_args["system"] = [
216
- {
217
- "type": "text",
218
- "text": base_args["system"],
219
- "cache_control": {"type": "ephemeral"},
220
- }
221
- ]
222
- self.logger.debug(
223
- "Applied cache_control to system prompt (caches tools+system in one block)"
224
- )
225
- else:
226
- self.logger.debug(f"System prompt is not a string: {type(base_args['system'])}")
385
+ # Add tool_choice for structured output mode
386
+ if structured_model:
387
+ base_args["tool_choice"] = {"type": "tool", "name": "return_structured_output"}
227
388
 
228
- # Apply conversation caching using walking algorithm if in auto mode
229
- if cache_mode == "auto" and self.history.should_apply_conversation_cache():
230
- cache_updates = self.history.get_conversation_cache_updates()
389
+ # Apply cache control to system prompt
390
+ self._apply_system_cache(base_args, cache_mode)
231
391
 
232
- # Remove cache control from old positions
233
- if cache_updates["remove"]:
234
- self.history.remove_cache_control_from_messages(
235
- messages, cache_updates["remove"]
236
- )
237
- self.logger.debug(
238
- f"Removed conversation cache_control from positions {cache_updates['remove']}"
239
- )
392
+ # Apply conversation caching
393
+ applied_count = await self._apply_conversation_cache(messages, cache_mode)
240
394
 
241
- # Add cache control to new positions
242
- if cache_updates["add"]:
243
- applied_count = self.history.add_cache_control_to_messages(
244
- messages, cache_updates["add"]
395
+ # Verify we don't exceed Anthropic's 4 cache block limit
396
+ if applied_count > 0:
397
+ total_cache_blocks = applied_count
398
+ if cache_mode != "off" and base_args["system"]:
399
+ total_cache_blocks += 1 # tools+system cache block
400
+ if total_cache_blocks > 4:
401
+ self.logger.warning(
402
+ f"Total cache blocks ({total_cache_blocks}) exceeds Anthropic limit of 4"
245
403
  )
246
- if applied_count > 0:
247
- self.history.apply_conversation_cache_updates(cache_updates)
248
- self.logger.debug(
249
- f"Applied conversation cache_control to positions {cache_updates['add']} ({applied_count} blocks)"
250
- )
251
-
252
- # Verify we don't exceed Anthropic's 4 cache block limit
253
- total_cache_blocks = applied_count
254
- if cache_mode != "off" and base_args["system"]:
255
- total_cache_blocks += 1 # tools+system cache block
256
- if total_cache_blocks > 4:
257
- self.logger.warning(
258
- f"Total cache blocks ({total_cache_blocks}) exceeds Anthropic limit of 4"
259
- )
260
- else:
261
- self.logger.debug(
262
- f"Failed to apply conversation cache_control to positions {cache_updates['add']}"
263
- )
264
404
 
265
405
  if params.maxTokens is not None:
266
406
  base_args["max_tokens"] = params.maxTokens
@@ -386,34 +526,22 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
386
526
  style="dim green italic",
387
527
  )
388
528
 
389
- # Process all tool calls and collect results
390
- tool_results = []
391
- # Use a different loop variable for tool enumeration if 'i' is outer loop counter
392
- for tool_idx, content_block in enumerate(tool_uses):
393
- tool_name = content_block.name
394
- tool_args = content_block.input
395
- tool_use_id = content_block.id
396
-
397
- if tool_idx == 0: # Only show message for first tool use
398
- await self.show_assistant_message(message_text, tool_name)
399
-
400
- self.show_tool_call(available_tools, tool_name, tool_args)
401
- tool_call_request = CallToolRequest(
402
- method="tools/call",
403
- params=CallToolRequestParams(name=tool_name, arguments=tool_args),
404
- )
405
- # TODO -- support MCP isError etc.
406
- result = await self.call_tool(
407
- request=tool_call_request, tool_call_id=tool_use_id
408
- )
409
- self.show_tool_result(result)
410
-
411
- # Add each result to our collection
412
- tool_results.append((tool_use_id, result))
413
- responses.extend(result.content)
529
+ # Process all tool calls using the helper method
530
+ tool_results, tool_responses = await self._process_tool_calls(
531
+ tool_uses, available_tools, message_text, structured_model
532
+ )
533
+ responses.extend(tool_responses)
414
534
 
535
+ # Always add tool_results_message first (required by Anthropic API)
415
536
  messages.append(AnthropicConverter.create_tool_results_message(tool_results))
416
537
 
538
+ # For structured output, we have our result and should exit after sending tool_result
539
+ if structured_model and any(
540
+ tool.name == "return_structured_output" for tool in tool_uses
541
+ ):
542
+ self.logger.debug("Structured output received, breaking iteration loop")
543
+ break
544
+
417
545
  # Only save the new conversation messages to history if use_history is true
418
546
  # Keep the prompt messages separate
419
547
  if params.use_history:
@@ -500,19 +628,51 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
500
628
  ) -> Tuple[ModelT | None, PromptMessageMultipart]: # noqa: F821
501
629
  request_params = self.get_request_params(request_params)
502
630
 
503
- # TODO - convert this to use Tool Calling convention for Anthropic Structured outputs
504
- multipart_messages[-1].add_text(
505
- """YOU MUST RESPOND IN THE FOLLOWING FORMAT:
506
- {schema}
507
- RESPOND ONLY WITH THE JSON, NO PREAMBLE, CODE FENCES OR 'properties' ARE PERMISSABLE """.format(
508
- schema=model.model_json_schema()
509
- )
510
- )
631
+ # Check the last message role
632
+ last_message = multipart_messages[-1]
511
633
 
512
- result: PromptMessageMultipart = await self._apply_prompt_provider_specific(
513
- multipart_messages, request_params
634
+ # Add all previous messages to history (or all messages if last is from assistant)
635
+ messages_to_add = (
636
+ multipart_messages[:-1] if last_message.role == "user" else multipart_messages
514
637
  )
515
- return self._structured_from_multipart(result, model)
638
+ converted = []
639
+
640
+ for msg in messages_to_add:
641
+ anthropic_msg = AnthropicConverter.convert_to_anthropic(msg)
642
+ converted.append(anthropic_msg)
643
+
644
+ self.history.extend(converted, is_prompt=False)
645
+
646
+ if last_message.role == "user":
647
+ self.logger.debug("Last message in prompt is from user, generating structured response")
648
+ message_param = AnthropicConverter.convert_to_anthropic(last_message)
649
+
650
+ # Call _anthropic_completion with the structured model
651
+ response_content = await self._anthropic_completion(
652
+ message_param, request_params, structured_model=model
653
+ )
654
+
655
+ # Extract the structured data from the response
656
+ for content in response_content:
657
+ if content.type == "text":
658
+ try:
659
+ # Parse the JSON response from the tool
660
+ data = json.loads(content.text)
661
+ parsed_model = model(**data)
662
+ # Create assistant response
663
+ assistant_response = Prompt.assistant(content)
664
+ return parsed_model, assistant_response
665
+ except (json.JSONDecodeError, ValueError) as e:
666
+ self.logger.error(f"Failed to parse structured output: {e}")
667
+ assistant_response = Prompt.assistant(content)
668
+ return None, assistant_response
669
+
670
+ # If no valid response found
671
+ return None, Prompt.assistant()
672
+ else:
673
+ # For assistant messages: Return the last message content
674
+ self.logger.debug("Last message in prompt is from assistant, returning it directly")
675
+ return None, last_message
516
676
 
517
677
  def _show_usage(self, raw_usage: Usage, turn_usage: TurnUsage) -> None:
518
678
  # Print raw usage for debugging
@@ -4,7 +4,7 @@ import re
4
4
  from enum import Enum
5
5
  from typing import TYPE_CHECKING, Any, Dict, List, Tuple, Type, Union
6
6
 
7
- from mcp.types import EmbeddedResource, ImageContent, TextContent
7
+ from mcp.types import ContentBlock, TextContent
8
8
  from rich.text import Text
9
9
 
10
10
  from mcp_agent.core.exceptions import ProviderKeyError
@@ -1066,7 +1066,7 @@ class BedrockAugmentedLLM(AugmentedLLM[BedrockMessageParam, BedrockMessage]):
1066
1066
  self,
1067
1067
  message_param: BedrockMessageParam,
1068
1068
  request_params: RequestParams | None = None,
1069
- ) -> List[TextContent | ImageContent | EmbeddedResource]:
1069
+ ) -> List[ContentBlock | CallToolRequestParams]:
1070
1070
  """
1071
1071
  Process a query using Bedrock and available tools.
1072
1072
  """
@@ -1120,7 +1120,7 @@ class BedrockAugmentedLLM(AugmentedLLM[BedrockMessageParam, BedrockMessage]):
1120
1120
  f"Model {model_to_check} does not support tool use - skipping tool preparation"
1121
1121
  )
1122
1122
 
1123
- responses: List[TextContent | ImageContent | EmbeddedResource] = []
1123
+ responses: List[ContentBlock] = []
1124
1124
  model = self.default_request_params.model
1125
1125
 
1126
1126
  for i in range(params.max_iterations):
@@ -10,8 +10,7 @@ from mcp.types import (
10
10
  CallToolRequest,
11
11
  CallToolRequestParams,
12
12
  CallToolResult,
13
- EmbeddedResource,
14
- ImageContent,
13
+ ContentBlock,
15
14
  TextContent,
16
15
  )
17
16
  from rich.text import Text
@@ -228,12 +227,12 @@ class GoogleNativeAugmentedLLM(AugmentedLLM[types.Content, types.Content]):
228
227
  async def _google_completion(
229
228
  self,
230
229
  request_params: RequestParams | None = None,
231
- ) -> List[TextContent | ImageContent | EmbeddedResource]:
230
+ ) -> List[ContentBlock]:
232
231
  """
233
232
  Process a query using Google's generate_content API and available tools.
234
233
  """
235
234
  request_params = self.get_request_params(request_params=request_params)
236
- responses: List[TextContent | ImageContent | EmbeddedResource] = []
235
+ responses: List[ContentBlock] = []
237
236
 
238
237
  # Load full conversation history if use_history is true
239
238
  if request_params.use_history:
@@ -376,9 +375,7 @@ class GoogleNativeAugmentedLLM(AugmentedLLM[types.Content, types.Content]):
376
375
 
377
376
  # Execute the tool call. google.genai does not provide a tool_call_id, pass None.
378
377
  result = await self.call_tool(tool_call_request, None)
379
- self.show_oai_tool_result(
380
- str(result.content)
381
- ) # Use show_oai_tool_result for consistency
378
+ self.show_tool_result(result)
382
379
 
383
380
  tool_results.append((tool_call_params.name, result)) # Store name and result
384
381
 
@@ -4,8 +4,7 @@ from mcp.types import (
4
4
  CallToolRequest,
5
5
  CallToolRequestParams,
6
6
  CallToolResult,
7
- EmbeddedResource,
8
- ImageContent,
7
+ ContentBlock,
9
8
  TextContent,
10
9
  )
11
10
  from openai import AsyncOpenAI, AuthenticationError
@@ -298,7 +297,7 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
298
297
  self,
299
298
  message: OpenAIMessage,
300
299
  request_params: RequestParams | None = None,
301
- ) -> List[TextContent | ImageContent | EmbeddedResource]:
300
+ ) -> List[ContentBlock]:
302
301
  """
303
302
  Process a query using an LLM and available tools.
304
303
  The default implementation uses OpenAI's ChatCompletion as the LLM.
@@ -307,7 +306,7 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
307
306
 
308
307
  request_params = self.get_request_params(request_params=request_params)
309
308
 
310
- responses: List[TextContent | ImageContent | EmbeddedResource] = []
309
+ responses: List[ContentBlock] = []
311
310
 
312
311
  # TODO -- move this in to agent context management / agent group handling
313
312
  messages: List[ChatCompletionMessageParam] = []
@@ -427,7 +426,7 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
427
426
  ),
428
427
  )
429
428
  result = await self.call_tool(tool_call_request, tool_call.id)
430
- self.show_oai_tool_result(str(result))
429
+ self.show_tool_result(result)
431
430
 
432
431
  tool_results.append((tool_call.id, result))
433
432
  responses.extend(result.content)
@@ -511,9 +510,7 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
511
510
 
512
511
  # For assistant messages: Return the last message (no completion needed)
513
512
  message_param: OpenAIMessage = OpenAIConverter.convert_to_openai(last_message)
514
- responses: List[
515
- TextContent | ImageContent | EmbeddedResource
516
- ] = await self._openai_completion(
513
+ responses: List[ContentBlock] = await self._openai_completion(
517
514
  message_param,
518
515
  request_params,
519
516
  )
@@ -5,8 +5,7 @@ from mcp.types import (
5
5
  CallToolRequest,
6
6
  CallToolRequestParams,
7
7
  CallToolResult,
8
- EmbeddedResource,
9
- ImageContent,
8
+ ContentBlock,
10
9
  TextContent,
11
10
  )
12
11
  from tensorzero import AsyncTensorZeroGateway
@@ -169,7 +168,7 @@ class TensorZeroAugmentedLLM(AugmentedLLM[Dict[str, Any], Any]):
169
168
  available_tools: Optional[List[Dict[str, Any]]] = await self._prepare_t0_tools()
170
169
 
171
170
  # [3] Initialize storage arrays for the text content of the assistant message reply and, optionally, tool calls and results, and begin inference loop
172
- final_assistant_message: List[Union[TextContent, ImageContent, EmbeddedResource]] = []
171
+ final_assistant_message: List[ContentBlock] = []
173
172
  last_executed_results: Optional[List[CallToolResult]] = None
174
173
 
175
174
  for i in range(merged_params.max_iterations):
@@ -353,11 +352,11 @@ class TensorZeroAugmentedLLM(AugmentedLLM[Dict[str, Any], Any]):
353
352
  completion: Union[ChatInferenceResponse, JsonInferenceResponse],
354
353
  available_tools_for_display: Optional[List[Dict[str, Any]]] = None,
355
354
  ) -> Tuple[
356
- List[Union[TextContent, ImageContent, EmbeddedResource]], # Text/Image content ONLY
355
+ List[Union[ContentBlock]], # Text/Image content ONLY
357
356
  List[CallToolResult], # Executed results
358
357
  List[Any], # Raw tool_call blocks
359
358
  ]:
360
- content_parts_this_turn: List[Union[TextContent, ImageContent, EmbeddedResource]] = []
359
+ content_parts_this_turn: List[ContentBlock] = []
361
360
  executed_tool_results: List[CallToolResult] = []
362
361
  raw_tool_call_blocks_from_t0: List[Any] = []
363
362
 
@@ -402,7 +401,7 @@ class TensorZeroAugmentedLLM(AugmentedLLM[Dict[str, Any], Any]):
402
401
  setattr(result, "_t0_tool_name_temp", tool_name)
403
402
  setattr(result, "_t0_is_error_temp", False)
404
403
  executed_tool_results.append(result)
405
- self.show_oai_tool_result(str(result))
404
+ self.show_tool_result(result)
406
405
  except Exception as e:
407
406
  self.logger.error(
408
407
  f"Error executing tool {tool_name} (id: {tool_use_id}): {e}"
@@ -415,7 +414,7 @@ class TensorZeroAugmentedLLM(AugmentedLLM[Dict[str, Any], Any]):
415
414
  setattr(error_result, "_t0_tool_name_temp", tool_name)
416
415
  setattr(error_result, "_t0_is_error_temp", True)
417
416
  executed_tool_results.append(error_result)
418
- self.show_oai_tool_result(f"ERROR: {error_text}")
417
+ self.show_tool_result(error_result)
419
418
 
420
419
  elif block_type == "thought":
421
420
  thought_text = getattr(block, "text", None)
@@ -8,6 +8,7 @@ from mcp.types import (
8
8
  CallToolRequest,
9
9
  CallToolRequestParams,
10
10
  CallToolResult,
11
+ ContentBlock,
11
12
  EmbeddedResource,
12
13
  ImageContent,
13
14
  TextContent,
@@ -158,17 +159,15 @@ class GoogleConverter:
158
159
 
159
160
  def convert_from_google_content(
160
161
  self, content: types.Content
161
- ) -> List[TextContent | ImageContent | EmbeddedResource | CallToolRequestParams]:
162
+ ) -> List[ContentBlock | CallToolRequestParams]:
162
163
  """
163
164
  Converts google.genai types.Content from a model response to a list of
164
165
  fast-agent content types or tool call requests.
165
166
  """
166
- fast_agent_parts: List[
167
- TextContent | ImageContent | EmbeddedResource | CallToolRequestParams
168
- ] = []
167
+ fast_agent_parts: List[ContentBlock | CallToolRequestParams] = []
169
168
 
170
- if content is None or not hasattr(content, 'parts') or content.parts is None:
171
- return [] # Google API response 'content' object is None. Cannot extract parts.
169
+ if content is None or not hasattr(content, "parts") or content.parts is None:
170
+ return [] # Google API response 'content' object is None. Cannot extract parts.
172
171
 
173
172
  for part in content.parts:
174
173
  if part.text:
@@ -340,9 +339,7 @@ class GoogleConverter:
340
339
  if content.role == "model" and any(part.function_call for part in content.parts):
341
340
  return PromptMessageMultipart(role="assistant", content=[])
342
341
 
343
- fast_agent_parts: List[
344
- TextContent | ImageContent | EmbeddedResource | CallToolRequestParams
345
- ] = []
342
+ fast_agent_parts: List[ContentBlock | CallToolRequestParams] = []
346
343
  for part in content.parts:
347
344
  if part.text:
348
345
  fast_agent_parts.append(TextContent(type="text", text=part.text))
@@ -16,6 +16,7 @@ from anthropic.types import (
16
16
  from mcp.types import (
17
17
  BlobResourceContents,
18
18
  CallToolResult,
19
+ ContentBlock,
19
20
  EmbeddedResource,
20
21
  ImageContent,
21
22
  PromptMessage,
@@ -117,7 +118,7 @@ class AnthropicConverter:
117
118
 
118
119
  @staticmethod
119
120
  def _convert_content_items(
120
- content_items: Sequence[Union[TextContent, ImageContent, EmbeddedResource]],
121
+ content_items: Sequence[ContentBlock],
121
122
  document_mode: bool = True,
122
123
  ) -> List[ContentBlockParam]:
123
124
  """
@@ -210,7 +211,7 @@ class AnthropicConverter:
210
211
  return ImageBlockParam(
211
212
  type="image", source=URLImageSourceParam(type="url", url=uri_str)
212
213
  )
213
-
214
+
214
215
  # Try to get image data
215
216
  image_data = get_image_data(resource)
216
217
  if image_data:
@@ -220,7 +221,7 @@ class AnthropicConverter:
220
221
  type="base64", media_type=mime_type, data=image_data
221
222
  ),
222
223
  )
223
-
224
+
224
225
  return AnthropicConverter._create_fallback_text("Image missing data", resource)
225
226
 
226
227
  elif mime_type == "application/pdf":
@@ -450,4 +451,4 @@ class AnthropicConverter:
450
451
  # Add separate blocks directly to the message
451
452
  content_blocks.extend(separate_blocks)
452
453
 
453
- return MessageParam(role="user", content=content_blocks)
454
+ return MessageParam(role="user", content=content_blocks)