fast-agent-mcp 0.2.49__py3-none-any.whl → 0.2.51__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of fast-agent-mcp might be problematic. Click here for more details.

Files changed (40) hide show
  1. {fast_agent_mcp-0.2.49.dist-info → fast_agent_mcp-0.2.51.dist-info}/METADATA +4 -4
  2. {fast_agent_mcp-0.2.49.dist-info → fast_agent_mcp-0.2.51.dist-info}/RECORD +38 -21
  3. mcp_agent/cli/commands/quickstart.py +107 -0
  4. mcp_agent/event_progress.py +18 -0
  5. mcp_agent/llm/model_database.py +39 -1
  6. mcp_agent/llm/model_factory.py +5 -3
  7. mcp_agent/llm/providers/augmented_llm_aliyun.py +7 -8
  8. mcp_agent/llm/providers/augmented_llm_deepseek.py +7 -8
  9. mcp_agent/llm/providers/augmented_llm_groq.py +80 -7
  10. mcp_agent/llm/providers/augmented_llm_openai.py +18 -7
  11. mcp_agent/llm/providers/augmented_llm_openrouter.py +10 -15
  12. mcp_agent/llm/providers/augmented_llm_tensorzero_openai.py +127 -0
  13. mcp_agent/llm/providers/augmented_llm_xai.py +8 -8
  14. mcp_agent/llm/providers/google_converter.py +4 -0
  15. mcp_agent/logging/rich_progress.py +30 -7
  16. mcp_agent/mcp/helpers/content_helpers.py +29 -0
  17. mcp_agent/mcp/mcp_aggregator.py +32 -1
  18. mcp_agent/resources/examples/tensorzero/.env.sample +2 -0
  19. mcp_agent/resources/examples/tensorzero/Makefile +31 -0
  20. mcp_agent/resources/examples/tensorzero/README.md +55 -0
  21. mcp_agent/resources/examples/tensorzero/agent.py +35 -0
  22. mcp_agent/resources/examples/tensorzero/demo_images/clam.jpg +0 -0
  23. mcp_agent/resources/examples/tensorzero/demo_images/crab.png +0 -0
  24. mcp_agent/resources/examples/tensorzero/demo_images/shrimp.png +0 -0
  25. mcp_agent/resources/examples/tensorzero/docker-compose.yml +105 -0
  26. mcp_agent/resources/examples/tensorzero/fastagent.config.yaml +19 -0
  27. mcp_agent/resources/examples/tensorzero/image_demo.py +67 -0
  28. mcp_agent/resources/examples/tensorzero/mcp_server/Dockerfile +25 -0
  29. mcp_agent/resources/examples/tensorzero/mcp_server/entrypoint.sh +35 -0
  30. mcp_agent/resources/examples/tensorzero/mcp_server/mcp_server.py +31 -0
  31. mcp_agent/resources/examples/tensorzero/mcp_server/pyproject.toml +11 -0
  32. mcp_agent/resources/examples/tensorzero/simple_agent.py +25 -0
  33. mcp_agent/resources/examples/tensorzero/tensorzero_config/system_schema.json +29 -0
  34. mcp_agent/resources/examples/tensorzero/tensorzero_config/system_template.minijinja +11 -0
  35. mcp_agent/resources/examples/tensorzero/tensorzero_config/tensorzero.toml +35 -0
  36. mcp_agent/llm/providers/augmented_llm_tensorzero.py +0 -441
  37. mcp_agent/llm/providers/multipart_converter_tensorzero.py +0 -201
  38. {fast_agent_mcp-0.2.49.dist-info → fast_agent_mcp-0.2.51.dist-info}/WHEEL +0 -0
  39. {fast_agent_mcp-0.2.49.dist-info → fast_agent_mcp-0.2.51.dist-info}/entry_points.txt +0 -0
  40. {fast_agent_mcp-0.2.49.dist-info → fast_agent_mcp-0.2.51.dist-info}/licenses/LICENSE +0 -0
@@ -1,441 +0,0 @@
1
- import json
2
- from typing import Any, Dict, List, Optional, Tuple, Union
3
-
4
- from mcp.types import (
5
- CallToolRequest,
6
- CallToolRequestParams,
7
- CallToolResult,
8
- ContentBlock,
9
- TextContent,
10
- )
11
- from tensorzero import AsyncTensorZeroGateway
12
- from tensorzero.types import (
13
- ChatInferenceResponse,
14
- JsonInferenceResponse,
15
- TensorZeroError,
16
- )
17
-
18
- from mcp_agent.agents.agent import Agent
19
- from mcp_agent.core.exceptions import ModelConfigError
20
- from mcp_agent.core.request_params import RequestParams
21
- from mcp_agent.llm.augmented_llm import AugmentedLLM
22
- from mcp_agent.llm.memory import Memory, SimpleMemory
23
- from mcp_agent.llm.provider_types import Provider
24
- from mcp_agent.llm.providers.multipart_converter_tensorzero import TensorZeroConverter
25
- from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
26
-
27
-
28
- class TensorZeroAugmentedLLM(AugmentedLLM[Dict[str, Any], Any]):
29
- """
30
- AugmentedLLM implementation for TensorZero using its native API.
31
- Uses the Converter pattern for message formatting.
32
- Implements multi-turn tool calling logic, storing API dicts in history.
33
- """
34
-
35
- def __init__(
36
- self,
37
- agent: Agent,
38
- model: str,
39
- request_params: Optional[RequestParams] = None,
40
- **kwargs: Any,
41
- ):
42
- self._t0_gateway: Optional[AsyncTensorZeroGateway] = None
43
- self._t0_function_name: str = model
44
- self._t0_episode_id: Optional[str] = kwargs.get("episode_id")
45
-
46
- super().__init__(
47
- agent=agent,
48
- model=model,
49
- provider=Provider.TENSORZERO,
50
- request_params=request_params,
51
- **kwargs,
52
- )
53
-
54
- self.history: Memory[Dict[str, Any]] = SimpleMemory[Dict[str, Any]]()
55
-
56
- self.logger.info(
57
- f"TensorZero LLM provider initialized for function '{self._t0_function_name}'. History type: {type(self.history)}"
58
- )
59
-
60
- @staticmethod
61
- def block_to_dict(block: Any) -> Dict[str, Any]:
62
- if hasattr(block, "model_dump"):
63
- try:
64
- dumped = block.model_dump(mode="json")
65
- if dumped:
66
- return dumped
67
- except Exception:
68
- pass
69
- if hasattr(block, "__dict__"):
70
- try:
71
- block_vars = vars(block)
72
- if block_vars:
73
- return block_vars
74
- except Exception:
75
- pass
76
- if isinstance(block, (str, int, float, bool, list, dict, type(None))):
77
- return {"type": "raw", "content": block}
78
-
79
- # Basic attribute extraction as fallback
80
- d = {"type": getattr(block, "type", "unknown")}
81
- for attr in ["id", "name", "text", "arguments"]:
82
- if hasattr(block, attr):
83
- d[attr] = getattr(block, attr)
84
- if len(d) == 1 and d.get("type") == "unknown":
85
- d["content"] = str(block)
86
- return d
87
-
88
- def _initialize_default_params(self, kwargs: dict) -> RequestParams:
89
- func_name = kwargs.get("model", self._t0_function_name or "unknown_t0_function")
90
- return RequestParams(
91
- model=func_name,
92
- systemPrompt=self.instruction,
93
- maxTokens=4096,
94
- use_history=True,
95
- max_iterations=20, # Max iterations for tool use loop
96
- parallel_tool_calls=True,
97
- )
98
-
99
- async def _initialize_gateway(self) -> AsyncTensorZeroGateway:
100
- if self._t0_gateway is None:
101
- self.logger.debug("Initializing AsyncTensorZeroGateway client...")
102
- try:
103
- base_url: Optional[str] = None
104
- default_url = "http://localhost:3000"
105
-
106
- if (
107
- self.context
108
- and self.context.config
109
- and hasattr(self.context.config, "tensorzero")
110
- and self.context.config.tensorzero
111
- ):
112
- base_url = getattr(self.context.config.tensorzero, "base_url", None)
113
-
114
- if not base_url:
115
- if not self.context:
116
- # Handle case where context itself is missing, log and use default
117
- self.logger.warning(
118
- f"LLM context not found. Cannot read TensorZero Gateway base URL configuration. "
119
- f"Using default: {default_url}"
120
- )
121
- else:
122
- self.logger.warning(
123
- f"TensorZero Gateway base URL not configured in context.config.tensorzero.base_url. "
124
- f"Using default: {default_url}"
125
- )
126
-
127
- base_url = default_url
128
-
129
- self._t0_gateway = await AsyncTensorZeroGateway.build_http(gateway_url=base_url) # type: ignore
130
- self.logger.info(f"TensorZero Gateway client initialized for URL: {base_url}")
131
- except Exception as e:
132
- self.logger.error(f"Failed to initialize TensorZero Gateway: {e}")
133
- raise ModelConfigError(f"Failed to initialize TensorZero Gateway lazily: {e}")
134
-
135
- return self._t0_gateway
136
-
137
- async def _apply_prompt_provider_specific(
138
- self,
139
- multipart_messages: List[PromptMessageMultipart],
140
- request_params: Optional[RequestParams] = None,
141
- is_template: bool = False,
142
- ) -> PromptMessageMultipart:
143
- gateway = await self._initialize_gateway()
144
- merged_params = self.get_request_params(request_params)
145
-
146
- # [1] Retrieve history
147
- current_api_messages: List[Dict[str, Any]] = []
148
- if merged_params.use_history:
149
- try:
150
- current_api_messages = self.history.get() or []
151
- self.logger.debug(
152
- f"Retrieved {len(current_api_messages)} API dict messages from history."
153
- )
154
- except Exception as e:
155
- self.logger.error(f"Error retrieving history: {e}")
156
-
157
- # [2] Convert *new* incoming PromptMessageMultipart messages to API dicts
158
- for msg in multipart_messages:
159
- msg_dict = TensorZeroConverter.convert_mcp_to_t0_message(msg)
160
- if msg_dict:
161
- current_api_messages.append(msg_dict)
162
-
163
- t0_system_vars = self._prepare_t0_system_params(merged_params)
164
- if t0_system_vars:
165
- t0_api_input_dict = {"system": t0_system_vars}
166
- else:
167
- t0_api_input_dict = {}
168
- available_tools: Optional[List[Dict[str, Any]]] = await self._prepare_t0_tools()
169
-
170
- # [3] Initialize storage arrays for the text content of the assistant message reply and, optionally, tool calls and results, and begin inference loop
171
- final_assistant_message: List[ContentBlock] = []
172
- last_executed_results: Optional[List[CallToolResult]] = None
173
-
174
- for i in range(merged_params.max_iterations):
175
- use_parallel_calls = merged_params.parallel_tool_calls if available_tools else False
176
- current_t0_episode_id = self._t0_episode_id
177
-
178
- try:
179
- self.logger.debug(
180
- f"Calling TensorZero inference (Iteration {i + 1}/{merged_params.max_iterations})..."
181
- )
182
- t0_api_input_dict["messages"] = current_api_messages # type: ignore
183
-
184
- # [4] Call the TensorZero inference API
185
- response_iter_or_completion = await gateway.inference(
186
- function_name=self._t0_function_name,
187
- input=t0_api_input_dict,
188
- additional_tools=available_tools,
189
- parallel_tool_calls=use_parallel_calls,
190
- stream=False,
191
- episode_id=current_t0_episode_id,
192
- )
193
-
194
- if not isinstance(
195
- response_iter_or_completion, (ChatInferenceResponse, JsonInferenceResponse)
196
- ):
197
- self.logger.error(
198
- f"Unexpected TensorZero response type: {type(response_iter_or_completion)}"
199
- )
200
- final_assistant_message = [
201
- TextContent(type="text", text="Unexpected response type")
202
- ]
203
- break # Exit loop
204
-
205
- # [5] quick check to confirm that episode_id is present and being used correctly by TensorZero
206
- completion = response_iter_or_completion
207
- if completion.episode_id: #
208
- self._t0_episode_id = str(completion.episode_id)
209
- if (
210
- self._t0_episode_id != current_t0_episode_id
211
- and current_t0_episode_id is not None
212
- ):
213
- raise Exception(
214
- f"Episode ID mismatch: {self._t0_episode_id} != {current_t0_episode_id}"
215
- )
216
-
217
- # [6] Adapt TensorZero inference response to a format compatible with the broader framework
218
- (
219
- content_parts_this_turn, # Text/Image content ONLY
220
- executed_results_this_iter, # Results from THIS iteration
221
- raw_tool_call_blocks,
222
- ) = await self._adapt_t0_native_completion(completion, available_tools)
223
-
224
- last_executed_results = (
225
- executed_results_this_iter # Track results from this iteration
226
- )
227
-
228
- # [7] If a text message was returned from the assistant, format that message using the multipart_converter_tensorzero.py helper methods and add this to the current list of API messages
229
- assistant_api_content = []
230
- for part in content_parts_this_turn:
231
- api_part = TensorZeroConverter._convert_content_part(part)
232
- if api_part:
233
- assistant_api_content.append(api_part)
234
- if raw_tool_call_blocks:
235
- assistant_api_content.extend(
236
- [self.block_to_dict(b) for b in raw_tool_call_blocks]
237
- )
238
-
239
- if assistant_api_content:
240
- assistant_api_message_dict = {
241
- "role": "assistant",
242
- "content": assistant_api_content,
243
- }
244
- current_api_messages.append(assistant_api_message_dict)
245
- elif executed_results_this_iter:
246
- self.logger.debug(
247
- "Assistant turn contained only tool calls, no API message added."
248
- )
249
-
250
- final_assistant_message = content_parts_this_turn
251
-
252
- # [8] If there were no tool calls we're done. If not, format the tool results and add them to the current list of API messages
253
- if not executed_results_this_iter:
254
- self.logger.debug(f"Iteration {i + 1}: No tool calls detected. Finishing loop.")
255
- break
256
- else:
257
- user_message_with_results = (
258
- TensorZeroConverter.convert_tool_results_to_t0_user_message(
259
- executed_results_this_iter
260
- )
261
- )
262
- if user_message_with_results:
263
- current_api_messages.append(user_message_with_results)
264
- else:
265
- self.logger.error("Converter failed to format tool results, breaking loop.")
266
- break
267
-
268
- # Check max iterations: TODO: implement logic in the future to handle this dynamically, checking for the presence of a tool call in the last iteration
269
- if i == merged_params.max_iterations - 1:
270
- self.logger.warning(f"Max iterations ({merged_params.max_iterations}) reached.")
271
- break
272
-
273
- # --- Error Handling for Inference Call ---
274
- except TensorZeroError as e:
275
- error_details = getattr(e, "detail", str(e.args[0] if e.args else e))
276
- self.logger.error(f"TensorZero Error (HTTP {e.status_code}): {error_details}")
277
- error_content = TextContent(type="text", text=f"TensorZero Error: {error_details}")
278
- return PromptMessageMultipart(role="assistant", content=[error_content])
279
- except Exception as e:
280
- import traceback
281
-
282
- self.logger.error(f"Unexpected Error: {e}\n{traceback.format_exc()}")
283
- error_content = TextContent(type="text", text=f"Unexpected error: {e}")
284
- return PromptMessageMultipart(role="assistant", content=[error_content])
285
-
286
- # [9] Construct the final assistant message and update history
287
- final_message_to_return = PromptMessageMultipart(
288
- role="assistant", content=final_assistant_message
289
- )
290
-
291
- if merged_params.use_history:
292
- try:
293
- # Store the final list of API DICTIONARIES in history
294
- self.history.set(current_api_messages)
295
- self.logger.debug(
296
- f"Updated self.history with {len(current_api_messages)} API message dicts."
297
- )
298
- except Exception as e:
299
- self.logger.error(f"Failed to update self.history after loop: {e}")
300
-
301
- # [10] Post final assistant message to display
302
- display_text = final_message_to_return.all_text()
303
- if display_text and display_text != "<no text>":
304
- title = f"ASSISTANT/{self._t0_function_name}"
305
- await self.show_assistant_message(message_text=display_text, title=title)
306
-
307
- elif not final_assistant_message and last_executed_results:
308
- self.logger.debug("Final assistant turn involved only tool calls, no text to display.")
309
-
310
- return final_message_to_return
311
-
312
- def _prepare_t0_system_params(self, merged_params: RequestParams) -> Dict[str, Any]:
313
- """Prepares the 'system' dictionary part of the main input."""
314
- t0_func_input = merged_params.template_vars.copy()
315
-
316
- metadata_args = None
317
- if merged_params.metadata and isinstance(merged_params.metadata, dict):
318
- metadata_args = merged_params.metadata.get("tensorzero_arguments")
319
- if isinstance(metadata_args, dict):
320
- t0_func_input.update(metadata_args)
321
- self.logger.debug(f"Merged tensorzero_arguments from metadata: {metadata_args}")
322
- return t0_func_input
323
-
324
- async def _prepare_t0_tools(self) -> Optional[List[Dict[str, Any]]]:
325
- """Fetches and formats tools for the additional_tools parameter."""
326
- formatted_tools: List[Dict[str, Any]] = []
327
- try:
328
- tools_response = await self.aggregator.list_tools()
329
- if tools_response and hasattr(tools_response, "tools") and tools_response.tools:
330
- for mcp_tool in tools_response.tools:
331
- if (
332
- not isinstance(mcp_tool.inputSchema, dict)
333
- or mcp_tool.inputSchema.get("type") != "object"
334
- ):
335
- self.logger.warning(
336
- f"Tool '{mcp_tool.name}' has invalid parameters schema. Skipping."
337
- )
338
- continue
339
- t0_tool_dict = {
340
- "name": mcp_tool.name,
341
- "description": mcp_tool.description if mcp_tool.description else "",
342
- "parameters": mcp_tool.inputSchema,
343
- }
344
- formatted_tools.append(t0_tool_dict)
345
- return formatted_tools if formatted_tools else None
346
- except Exception as e:
347
- self.logger.error(f"Failed to fetch or format tools: {e}")
348
- return None
349
-
350
- async def _adapt_t0_native_completion(
351
- self,
352
- completion: Union[ChatInferenceResponse, JsonInferenceResponse],
353
- available_tools_for_display: Optional[List[Dict[str, Any]]] = None,
354
- ) -> Tuple[
355
- List[Union[ContentBlock]], # Text/Image content ONLY
356
- List[CallToolResult], # Executed results
357
- List[Any], # Raw tool_call blocks
358
- ]:
359
- content_parts_this_turn: List[ContentBlock] = []
360
- executed_tool_results: List[CallToolResult] = []
361
- raw_tool_call_blocks_from_t0: List[Any] = []
362
-
363
- if isinstance(completion, ChatInferenceResponse) and hasattr(completion, "content"):
364
- for block in completion.content:
365
- block_type = getattr(block, "type", "UNKNOWN")
366
-
367
- if block_type == "text":
368
- text_val = getattr(block, "text", None)
369
- if text_val is not None:
370
- content_parts_this_turn.append(TextContent(type="text", text=text_val))
371
-
372
- elif block_type == "tool_call":
373
- raw_tool_call_blocks_from_t0.append(block)
374
- tool_use_id = getattr(block, "id", None)
375
- tool_name = getattr(block, "name", None)
376
- tool_input_raw = getattr(block, "arguments", None)
377
- tool_input = {}
378
- if isinstance(tool_input_raw, dict):
379
- tool_input = tool_input_raw
380
- elif isinstance(tool_input_raw, str):
381
- try:
382
- tool_input = json.loads(tool_input_raw)
383
- except json.JSONDecodeError:
384
- tool_input = {}
385
- elif tool_input_raw is not None:
386
- tool_input = {}
387
-
388
- if tool_use_id and tool_name:
389
- self.show_tool_call(
390
- available_tools_for_display, tool_name, json.dumps(tool_input)
391
- )
392
- mcp_tool_request = CallToolRequest(
393
- method="tools/call",
394
- params=CallToolRequestParams(name=tool_name, arguments=tool_input),
395
- )
396
- try:
397
- result: CallToolResult = await self.call_tool(
398
- mcp_tool_request, tool_use_id
399
- )
400
- setattr(result, "_t0_tool_use_id_temp", tool_use_id)
401
- setattr(result, "_t0_tool_name_temp", tool_name)
402
- setattr(result, "_t0_is_error_temp", False)
403
- executed_tool_results.append(result)
404
- self.show_tool_result(result)
405
- except Exception as e:
406
- self.logger.error(
407
- f"Error executing tool {tool_name} (id: {tool_use_id}): {e}"
408
- )
409
- error_text = f"Error executing tool {tool_name}: {str(e)}"
410
- error_result = CallToolResult(
411
- isError=True, content=[TextContent(type="text", text=error_text)]
412
- )
413
- setattr(error_result, "_t0_tool_use_id_temp", tool_use_id)
414
- setattr(error_result, "_t0_tool_name_temp", tool_name)
415
- setattr(error_result, "_t0_is_error_temp", True)
416
- executed_tool_results.append(error_result)
417
- self.show_tool_result(error_result)
418
-
419
- elif block_type == "thought":
420
- thought_text = getattr(block, "text", None)
421
- self.logger.debug(f"TensorZero thought: {thought_text}")
422
- else:
423
- self.logger.warning(
424
- f"TensorZero Adapt: Skipping unknown block type: {block_type}"
425
- )
426
-
427
- elif isinstance(completion, JsonInferenceResponse):
428
- # `completion.output.raw` should always be present unless the LLM provider returns unexpected data
429
- if completion.output.raw:
430
- content_parts_this_turn.append(TextContent(type="text", text=completion.output.raw))
431
-
432
- return content_parts_this_turn, executed_tool_results, raw_tool_call_blocks_from_t0
433
-
434
- async def shutdown(self):
435
- """Close the TensorZero gateway client if initialized."""
436
- if self._t0_gateway:
437
- try:
438
- await self._t0_gateway.close()
439
- self.logger.debug("TensorZero Gateway client closed.")
440
- except Exception as e:
441
- self.logger.error(f"Error closing TensorZero Gateway client: {e}")
@@ -1,201 +0,0 @@
1
- import json
2
- from typing import Any, Dict, List, Optional
3
-
4
- from mcp.types import (
5
- CallToolResult,
6
- ContentBlock,
7
- EmbeddedResource,
8
- ImageContent,
9
- TextContent,
10
- )
11
-
12
- from mcp_agent.logging.logger import get_logger
13
- from mcp_agent.mcp.helpers.content_helpers import (
14
- get_text,
15
- )
16
- from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
17
-
18
- _logger = get_logger(__name__)
19
-
20
-
21
- class TensorZeroConverter:
22
- """Converts MCP message types to/from TensorZero API format."""
23
-
24
- @staticmethod
25
- def _convert_content_part(
26
- part: ContentBlock,
27
- ) -> Optional[Dict[str, Any]]:
28
- """Converts a single MCP content part to a T0 content block dictionary."""
29
- if isinstance(part, TextContent):
30
- text = get_text(part)
31
- if text is not None:
32
- return {"type": "text", "text": text}
33
- elif isinstance(part, ImageContent):
34
- # Handle Base64: needs data, mimeType (and mimeType must not be empty)
35
- if hasattr(part, "data") and part.data and hasattr(part, "mimeType") and part.mimeType:
36
- _logger.debug(
37
- f"Converting ImageContent as base64 for T0 native: mime={part.mimeType}, data_len={len(part.data) if isinstance(part.data, str) else 'N/A'}"
38
- )
39
- supported_mime_types = ["image/jpeg", "image/png", "image/webp"]
40
- mime_type = getattr(part, "mimeType", "")
41
-
42
- # Use the provided mime_type if supported, otherwise default to png
43
- if mime_type not in supported_mime_types:
44
- _logger.warning(
45
- f"Unsupported mimeType '{mime_type}' for T0 base64 image, defaulting to image/png."
46
- )
47
- mime_type = "image/png"
48
-
49
- return {
50
- "type": "image",
51
- "mime_type": mime_type, # Note: T0 uses mime_type, not media_type
52
- "data": getattr(part, "data", ""), # Data is direct property
53
- }
54
- else:
55
- # Log cases where it's an ImageContent but doesn't fit Base64 criteria
56
- _logger.warning(
57
- f"Skipping ImageContent: Missing required base64 fields (mimeType/data), or mimeType is empty: {part}"
58
- )
59
-
60
- elif isinstance(part, EmbeddedResource):
61
- _logger.warning(f"Skipping EmbeddedResource, T0 conversion not implemented: {part}")
62
- else:
63
- _logger.error(
64
- f"Unsupported content part type for T0 conversion: {type(part)}"
65
- ) # Changed to error
66
-
67
- return None # Return None if no block was successfully created
68
-
69
- @staticmethod
70
- def _get_text_from_call_tool_result(result: CallToolResult) -> str:
71
- """Helper to extract combined text from a CallToolResult's content list."""
72
- texts = []
73
- if result.content:
74
- for part in result.content:
75
- text = get_text(part)
76
- if text:
77
- texts.append(text)
78
- return "\n".join(texts)
79
-
80
- @staticmethod
81
- def convert_tool_results_to_t0_user_message(
82
- results: List[CallToolResult],
83
- ) -> Optional[Dict[str, Any]]:
84
- """Formats CallToolResult list into T0's tool_result blocks within a user message dict."""
85
- t0_tool_result_blocks = []
86
- for result in results:
87
- tool_use_id = getattr(result, "_t0_tool_use_id_temp", None)
88
- tool_name = getattr(result, "_t0_tool_name_temp", None)
89
-
90
- if tool_use_id and tool_name:
91
- result_content_str = TensorZeroConverter._get_text_from_call_tool_result(result)
92
- try:
93
- # Attempt to treat result as JSON if possible, else use raw string
94
- try:
95
- json_result = json.loads(result_content_str)
96
- except json.JSONDecodeError:
97
- json_result = result_content_str # Fallback to string if not valid JSON
98
- except Exception as e:
99
- _logger.error(f"Unexpected error processing tool result content: {e}")
100
- json_result = str(result_content_str) # Safest fallback
101
-
102
- t0_block = {
103
- "type": "tool_result",
104
- "id": tool_use_id,
105
- "name": tool_name,
106
- "result": json_result, # T0 expects the result directly
107
- }
108
- t0_tool_result_blocks.append(t0_block)
109
-
110
- # Clean up temporary attributes
111
- try:
112
- delattr(result, "_t0_tool_use_id_temp")
113
- delattr(result, "_t0_tool_name_temp")
114
- if hasattr(result, "_t0_is_error_temp"):
115
- delattr(result, "_t0_is_error_temp")
116
- except AttributeError:
117
- pass
118
- else:
119
- _logger.warning(
120
- f"Could not find id/name temp attributes for CallToolResult: {result}"
121
- )
122
-
123
- if not t0_tool_result_blocks:
124
- return None
125
-
126
- return {"role": "user", "content": t0_tool_result_blocks}
127
-
128
- @staticmethod
129
- def convert_mcp_to_t0_message(msg: PromptMessageMultipart) -> Optional[Dict[str, Any]]:
130
- """
131
- Converts a single PromptMessageMultipart to a T0 API message dictionary.
132
- Handles Text, Image, and embedded CallToolResult content.
133
- Skips system messages.
134
- """
135
- if msg.role == "system":
136
- return None
137
-
138
- t0_content_blocks = []
139
- contains_tool_result = False
140
-
141
- for part in msg.content:
142
- # Use the corrected _convert_content_part
143
- converted_block = TensorZeroConverter._convert_content_part(part)
144
- if converted_block:
145
- t0_content_blocks.append(converted_block)
146
- elif isinstance(part, CallToolResult):
147
- # Existing logic for handling embedded CallToolResult (seems compatible with T0 tool_result spec)
148
- contains_tool_result = True
149
- tool_use_id = getattr(part, "_t0_tool_use_id_temp", None)
150
- tool_name = getattr(part, "_t0_tool_name_temp", None)
151
- if tool_use_id and tool_name:
152
- result_content_str = TensorZeroConverter._get_text_from_call_tool_result(part)
153
- # Try to format result as JSON object/string
154
- try:
155
- json_result = json.loads(result_content_str)
156
- except json.JSONDecodeError:
157
- json_result = result_content_str # Fallback
158
- except Exception as e:
159
- _logger.error(f"Error processing embedded tool result: {e}")
160
- json_result = str(result_content_str)
161
-
162
- t0_content_blocks.append(
163
- {
164
- "type": "tool_result",
165
- "id": tool_use_id,
166
- "name": tool_name,
167
- "result": json_result,
168
- }
169
- )
170
- # Clean up temp attributes
171
- try:
172
- delattr(part, "_t0_tool_use_id_temp")
173
- delattr(part, "_t0_tool_name_temp")
174
- except AttributeError:
175
- pass
176
- else:
177
- _logger.warning(
178
- f"Found embedded CallToolResult without required temp attributes: {part}"
179
- )
180
- # Note: The _convert_content_part handles logging for other skipped/unsupported types
181
-
182
- if not t0_content_blocks:
183
- return None
184
-
185
- # Determine role - logic remains the same
186
- valid_role = msg.role if msg.role in ["user", "assistant"] else "user"
187
- if contains_tool_result and all(
188
- block.get("type") == "tool_result" for block in t0_content_blocks
189
- ):
190
- final_role = "user"
191
- if valid_role != final_role:
192
- _logger.debug(f"Overriding role to '{final_role}' for tool result message.")
193
- else:
194
- final_role = valid_role
195
- if valid_role != msg.role:
196
- _logger.warning(f"Mapping message role '{msg.role}' to '{valid_role}' for T0.")
197
-
198
- return {"role": final_role, "content": t0_content_blocks}
199
-
200
- # Add methods here if needed to convert *from* T0 format back to MCP types
201
- # e.g., adapt_t0_response_to_mcp(...) - this logic stays in the LLM class for now