xgae 0.1.3__py3-none-any.whl → 0.1.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of xgae might be problematic. Click here for more details.

@@ -0,0 +1,751 @@
1
+ import asyncio
2
+ import json
3
+ import logging
4
+ from dataclasses import dataclass
5
+ from typing import List, Dict, Any, Optional, Tuple, Union, Literal, Callable,TypedDict,AsyncGenerator
6
+ from abc import ABC, abstractmethod
7
+
8
+ from xgae.engine.xga_base import XGAToolResult, XGAContextMsg, XGAToolBox
9
+ # from xgae.utils.setup_env import langfuse
10
+ from xgae.utils.json_helpers import (
11
+ safe_json_parse,
12
+ format_for_yield
13
+ )
14
+ from xgae.utils.xml_tool_parser import XMLToolParser
15
+
16
+ # Type alias for XML result adding strategy
17
+ XmlAddingStrategy = Literal["user_message", "assistant_message", "inline_edit"]
18
+
19
+ # Type alias for tool execution strategy
20
+ ToolExecutionStrategy = Literal["sequential", "parallel"]
21
+
22
+ class TaskResponseContext(TypedDict, total=False):
23
+ is_stream: bool
24
+ task_id: str
25
+ task_run_id: str
26
+ trace_id: str
27
+ model_name: str
28
+ max_xml_tool_calls: int
29
+ add_context_msg: Callable
30
+ tool_box: XGAToolBox
31
+ tool_execution_strategy: Literal["sequential", "parallel"]
32
+ xml_adding_strategy: Literal["user_message", "assistant_message", "inline_edit"]
33
+
34
+
35
+ class TaskRunContinuousState(TypedDict, total=False):
36
+ accumulated_content: str
37
+ auto_continue_count: int
38
+ auto_continue: bool
39
+
40
+ class Span:
41
+ def end(self, **kwargs):
42
+ pass
43
+
44
+ class Trace:
45
+ def event(self, **kwargs):
46
+ pass
47
+
48
+ def span(self, **kwargs):
49
+ return Span()
50
+
51
+
52
+
53
+ @dataclass
54
+ class ToolExecutionContext:
55
+ """Context for a tool execution including call details, result, and display info."""
56
+ tool_call: Dict[str, Any]
57
+ tool_index: int
58
+ result: Optional[XGAToolResult] = None
59
+ function_name: Optional[str] = None
60
+ xml_tag_name: Optional[str] = None
61
+ error: Optional[Exception] = None
62
+ assistant_message_id: Optional[str] = None
63
+ parsing_details: Optional[Dict[str, Any]] = None
64
+
65
+
66
+ @dataclass
67
+ class ProcessorConfig:
68
+ """
69
+ Configuration for response processing and tool execution.
70
+
71
+ This class controls how the LLM's responses are processed, including how tool calls
72
+ are detected, executed, and their results handled.
73
+
74
+ Attributes:
75
+ xml_tool_calling: Enable XML-based tool call detection (<tool>...</tool>)
76
+ native_tool_calling: Enable OpenAI-style function calling format
77
+ execute_tools: Whether to automatically execute detected tool calls
78
+ execute_on_stream: For streaming, execute tools as they appear vs. at the end
79
+ tool_execution_strategy: How to execute multiple tools ("sequential" or "parallel")
80
+ xml_adding_strategy: How to add XML tool results to the conversation
81
+ max_xml_tool_calls: Maximum number of XML tool calls to process (0 = no limit)
82
+ """
83
+
84
+ xml_tool_calling: bool = True
85
+ native_tool_calling: bool = False
86
+
87
+ execute_tools: bool = True
88
+ execute_on_stream: bool = False
89
+ tool_execution_strategy: ToolExecutionStrategy = "sequential"
90
+ xml_adding_strategy: XmlAddingStrategy = "assistant_message"
91
+ max_xml_tool_calls: int = 0 # 0 means no limit
92
+
93
+ def __post_init__(self):
94
+ """Validate configuration after initialization."""
95
+ if self.xml_tool_calling is False and self.native_tool_calling is False and self.execute_tools:
96
+ raise ValueError(
97
+ "At least one tool calling format (XML or native) must be enabled if execute_tools is True")
98
+
99
+ if self.xml_adding_strategy not in ["user_message", "assistant_message", "inline_edit"]:
100
+ raise ValueError("xml_adding_strategy must be 'user_message', 'assistant_message', or 'inline_edit'")
101
+
102
+ if self.max_xml_tool_calls < 0:
103
+ raise ValueError("max_xml_tool_calls must be a non-negative integer (0 = no limit)")
104
+
105
+
106
+ class TaskResponseProcessor(ABC):
107
+ def __init__(self, response_context: TaskResponseContext):
108
+ self.response_context = response_context
109
+
110
+ self.task_id = response_context.get("task_id", "")
111
+ self.task_run_id = response_context.get("task_run_id", "")
112
+
113
+ # Initialize the XML parser
114
+ self.trace = Trace()
115
+
116
+ self.add_message = response_context.get("add_context_msg")
117
+ self._add_message_with_agent_info = self.add_message
118
+ self.tool_box = response_context.get("tool_box")
119
+
120
+ self.xml_parser = XMLToolParser()
121
+
122
+ @abstractmethod
123
+ async def process_response(self,
124
+ llm_response: AsyncGenerator,
125
+ prompt_messages: List[Dict[str, Any]],
126
+ continuous_state: Optional[TaskRunContinuousState] = None
127
+ ) -> AsyncGenerator[Dict[str, Any], None]:
128
+ pass
129
+
130
+ async def _yield_message(self, message_obj: Optional[Dict[str, Any]]) -> Optional[Dict[str, Any]]:
131
+ """Helper to yield a message with proper formatting.
132
+
133
+ Ensures that content and metadata are JSON strings for client compatibility.
134
+ """
135
+ if message_obj:
136
+ return format_for_yield(message_obj)
137
+ return None
138
+
139
+
140
+
141
+ def _extract_xml_chunks(self, content: str) -> List[str]:
142
+ """Extract complete XML chunks using start and end pattern matching."""
143
+ chunks = []
144
+ pos = 0
145
+
146
+ try:
147
+ # First, look for new format <function_calls> blocks
148
+ start_pattern = '<function_calls>'
149
+ end_pattern = '</function_calls>'
150
+
151
+ while pos < len(content):
152
+ # Find the next function_calls block
153
+ start_pos = content.find(start_pattern, pos)
154
+ if start_pos == -1:
155
+ break
156
+
157
+ # Find the matching end tag
158
+ end_pos = content.find(end_pattern, start_pos)
159
+ if end_pos == -1:
160
+ break
161
+
162
+ # Extract the complete block including tags
163
+ chunk_end = end_pos + len(end_pattern)
164
+ chunk = content[start_pos:chunk_end]
165
+ chunks.append(chunk)
166
+
167
+ # Move position past this chunk
168
+ pos = chunk_end
169
+
170
+ # If no new format found, fall back to old format for backwards compatibility
171
+ if not chunks:
172
+ pos = 0
173
+ while pos < len(content):
174
+ # Find the next tool tag
175
+ next_tag_start = -1
176
+ current_tag = None
177
+
178
+ # Find the earliest occurrence of any registered tool function name
179
+ # Check for available function names
180
+ #available_functions = self.tool_registry.get_available_functions()
181
+ available_functions = self.tool_box.get_task_tool_names(self.task_id)
182
+ for func_name in available_functions:
183
+ # Convert function name to potential tag name (underscore to dash)
184
+ tag_name = func_name.replace('_', '-')
185
+ start_pattern = f'<{tag_name}'
186
+ tag_pos = content.find(start_pattern, pos)
187
+
188
+ if tag_pos != -1 and (next_tag_start == -1 or tag_pos < next_tag_start):
189
+ next_tag_start = tag_pos
190
+ current_tag = tag_name
191
+
192
+ if next_tag_start == -1 or not current_tag:
193
+ break
194
+
195
+ # Find the matching end tag
196
+ end_pattern = f'</{current_tag}>'
197
+ tag_stack = []
198
+ chunk_start = next_tag_start
199
+ current_pos = next_tag_start
200
+
201
+ while current_pos < len(content):
202
+ # Look for next start or end tag of the same type
203
+ next_start = content.find(f'<{current_tag}', current_pos + 1)
204
+ next_end = content.find(end_pattern, current_pos)
205
+
206
+ if next_end == -1: # No closing tag found
207
+ break
208
+
209
+ if next_start != -1 and next_start < next_end:
210
+ # Found nested start tag
211
+ tag_stack.append(next_start)
212
+ current_pos = next_start + 1
213
+ else:
214
+ # Found end tag
215
+ if not tag_stack: # This is our matching end tag
216
+ chunk_end = next_end + len(end_pattern)
217
+ chunk = content[chunk_start:chunk_end]
218
+ chunks.append(chunk)
219
+ pos = chunk_end
220
+ break
221
+ else:
222
+ # Pop nested tag
223
+ tag_stack.pop()
224
+ current_pos = next_end + 1
225
+
226
+ if current_pos >= len(content): # Reached end without finding closing tag
227
+ break
228
+
229
+ pos = max(pos + 1, current_pos)
230
+
231
+ except Exception as e:
232
+ logging.error(f"Error extracting XML chunks: {e}")
233
+ logging.error(f"Content was: {content}")
234
+ self.trace.event(name="error_extracting_xml_chunks", level="ERROR",
235
+ status_message=(f"Error extracting XML chunks: {e}"), metadata={"content": content})
236
+
237
+ return chunks
238
+
239
+ def _parse_xml_tool_call(self, xml_chunk: str) -> Optional[Tuple[Dict[str, Any], Dict[str, Any]]]:
240
+ """Parse XML chunk into tool call format and return parsing details.
241
+
242
+ Returns:
243
+ Tuple of (tool_call, parsing_details) or None if parsing fails.
244
+ - tool_call: Dict with 'function_name', 'xml_tag_name', 'arguments'
245
+ - parsing_details: Dict with 'attributes', 'elements', 'text_content', 'root_content'
246
+ """
247
+ try:
248
+ # Check if this is the new format (contains <function_calls>)
249
+ if '<function_calls>' in xml_chunk and '<invoke' in xml_chunk:
250
+ # Use the new XML parser
251
+ parsed_calls = self.xml_parser.parse_content(xml_chunk)
252
+
253
+ if not parsed_calls:
254
+ logging.error(f"No tool calls found in XML chunk: {xml_chunk}")
255
+ return None
256
+
257
+ # Take the first tool call (should only be one per chunk)
258
+ xml_tool_call = parsed_calls[0]
259
+
260
+ # Convert to the expected format
261
+ tool_call = {
262
+ "function_name": xml_tool_call.function_name,
263
+ "xml_tag_name": xml_tool_call.function_name.replace('_', '-'), # For backwards compatibility
264
+ "arguments": xml_tool_call.parameters
265
+ }
266
+
267
+ # Include the parsing details
268
+ parsing_details = xml_tool_call.parsing_details
269
+ parsing_details["raw_xml"] = xml_tool_call.raw_xml
270
+
271
+ logging.debug(f"Parsed new format tool call: {tool_call}")
272
+ return tool_call, parsing_details
273
+
274
+ # If not the expected <function_calls><invoke> format, return None
275
+ logging.error(f"XML chunk does not contain expected <function_calls><invoke> format: {xml_chunk}")
276
+ return None
277
+
278
+ except Exception as e:
279
+ logging.error(f"Error parsing XML chunk: {e}")
280
+ logging.error(f"XML chunk was: {xml_chunk}")
281
+ self.trace.event(name="error_parsing_xml_chunk", level="ERROR",
282
+ status_message=(f"Error parsing XML chunk: {e}"), metadata={"xml_chunk": xml_chunk})
283
+ return None
284
+
285
+ def _parse_xml_tool_calls(self, content: str) -> List[Dict[str, Any]]:
286
+ """Parse XML tool calls from content string.
287
+
288
+ Returns:
289
+ List of dictionaries, each containing {'tool_call': ..., 'parsing_details': ...}
290
+ """
291
+ parsed_data = []
292
+
293
+ try:
294
+ xml_chunks = self._extract_xml_chunks(content)
295
+
296
+ for xml_chunk in xml_chunks:
297
+ result = self._parse_xml_tool_call(xml_chunk)
298
+ if result:
299
+ tool_call, parsing_details = result
300
+ parsed_data.append({
301
+ "tool_call": tool_call,
302
+ "parsing_details": parsing_details
303
+ })
304
+
305
+ except Exception as e:
306
+ logging.error(f"Error parsing XML tool calls: {e}", exc_info=True)
307
+ self.trace.event(name="error_parsing_xml_tool_calls", level="ERROR",
308
+ status_message=(f"Error parsing XML tool calls: {e}"), metadata={"content": content})
309
+
310
+ return parsed_data
311
+
312
+ # Tool execution methods
313
+ async def _execute_tool(self, tool_call: Dict[str, Any]) -> XGAToolResult:
314
+ """Execute a single tool call and return the result."""
315
+ span = self.trace.span(name=f"execute_tool.{tool_call['function_name']}", input=tool_call["arguments"])
316
+ try:
317
+ function_name = tool_call["function_name"]
318
+ arguments = tool_call["arguments"]
319
+
320
+ logging.info(f"Executing tool: {function_name} with arguments: {arguments}")
321
+ self.trace.event(name="executing_tool", level="DEFAULT",
322
+ status_message=(f"Executing tool: {function_name} with arguments: {arguments}"))
323
+
324
+ if isinstance(arguments, str):
325
+ try:
326
+ arguments = safe_json_parse(arguments)
327
+ except json.JSONDecodeError:
328
+ arguments = {"text": arguments} # @todo modify
329
+
330
+ # Get available functions from tool registry
331
+ #available_functions = self.tool_registry.get_available_functions()
332
+ result = None
333
+ available_function_names = self.tool_box.get_task_tool_names(self.task_id)
334
+ if function_name in available_function_names:
335
+ result = await self.tool_box.call_tool(self.task_id, function_name, arguments)
336
+ else:
337
+ logging.error(f"Tool function '{function_name}' not found in registry")
338
+ result = XGAToolResult(success=False, output=f"Tool function '{function_name}' not found")
339
+ logging.info(f"Tool execution complete: {function_name} -> {result}")
340
+ span.end(status_message="tool_executed", output=result)
341
+ return result
342
+ except Exception as e:
343
+ logging.error(f"Error executing tool {tool_call['function_name']}: {str(e)}", exc_info=True)
344
+ span.end(status_message="tool_execution_error", output=f"Error executing tool: {str(e)}", level="ERROR")
345
+ return XGAToolResult(success=False, output=f"Error executing tool: {str(e)}")
346
+
347
+ async def _execute_tools(
348
+ self,
349
+ tool_calls: List[Dict[str, Any]],
350
+ execution_strategy: ToolExecutionStrategy = "sequential"
351
+ ) -> List[Tuple[Dict[str, Any], XGAToolResult]]:
352
+ logging.info(f"Executing {len(tool_calls)} tools with strategy: {execution_strategy}")
353
+ self.trace.event(name="executing_tools_with_strategy", level="DEFAULT",
354
+ status_message=(f"Executing {len(tool_calls)} tools with strategy: {execution_strategy}"))
355
+
356
+ if execution_strategy == "sequential":
357
+ return await self._execute_tools_sequentially(tool_calls)
358
+ elif execution_strategy == "parallel":
359
+ return await self._execute_tools_in_parallel(tool_calls)
360
+ else:
361
+ logging.warning(f"Unknown execution strategy: {execution_strategy}, falling back to sequential")
362
+ return await self._execute_tools_sequentially(tool_calls)
363
+
364
+ async def _execute_tools_sequentially(self, tool_calls: List[Dict[str, Any]]) -> List[
365
+ Tuple[Dict[str, Any], XGAToolResult]]:
366
+ """Execute tool calls sequentially and return results.
367
+
368
+ This method executes tool calls one after another, waiting for each tool to complete
369
+ before starting the next one. This is useful when tools have dependencies on each other.
370
+
371
+ Args:
372
+ tool_calls: List of tool calls to execute
373
+
374
+ Returns:
375
+ List of tuples containing the original tool call and its result
376
+ """
377
+ if not tool_calls:
378
+ return []
379
+
380
+ try:
381
+ tool_names = [t.get('function_name', 'unknown') for t in tool_calls]
382
+ logging.info(f"Executing {len(tool_calls)} tools sequentially: {tool_names}")
383
+ self.trace.event(name="executing_tools_sequentially", level="DEFAULT",
384
+ status_message=(f"Executing {len(tool_calls)} tools sequentially: {tool_names}"))
385
+
386
+ results = []
387
+ for index, tool_call in enumerate(tool_calls):
388
+ tool_name = tool_call.get('function_name', 'unknown')
389
+ logging.debug(f"Executing tool {index + 1}/{len(tool_calls)}: {tool_name}")
390
+
391
+ try:
392
+ result = await self._execute_tool(tool_call)
393
+ results.append((tool_call, result))
394
+ logging.debug(f"Completed tool {tool_name} with success={result.success}")
395
+
396
+ # Check if this is a terminating tool (ask or complete)
397
+ if tool_name in ['ask', 'complete']:
398
+ logging.info(f"Terminating tool '{tool_name}' executed. Stopping further tool execution.")
399
+ self.trace.event(name="terminating_tool_executed", level="DEFAULT", status_message=(
400
+ f"Terminating tool '{tool_name}' executed. Stopping further tool execution."))
401
+ break # Stop executing remaining tools
402
+
403
+ except Exception as e:
404
+ logging.error(f"Error executing tool {tool_name}: {str(e)}")
405
+ self.trace.event(name="error_executing_tool", level="ERROR",
406
+ status_message=(f"Error executing tool {tool_name}: {str(e)}"))
407
+ error_result = XGAToolResult(success=False, output=f"Error executing tool: {str(e)}")
408
+ results.append((tool_call, error_result))
409
+
410
+ logging.info(f"Sequential execution completed for {len(results)} tools (out of {len(tool_calls)} total)")
411
+ self.trace.event(name="sequential_execution_completed", level="DEFAULT", status_message=(
412
+ f"Sequential execution completed for {len(results)} tools (out of {len(tool_calls)} total)"))
413
+ return results
414
+
415
+ except Exception as e:
416
+ logging.error(f"Error in sequential tool execution: {str(e)}", exc_info=True)
417
+ # Return partial results plus error results for remaining tools
418
+ completed_results = results if 'results' in locals() else []
419
+ completed_tool_names = [r[0].get('function_name', 'unknown') for r in completed_results]
420
+ remaining_tools = [t for t in tool_calls if t.get('function_name', 'unknown') not in completed_tool_names]
421
+
422
+ # Add error results for remaining tools
423
+ error_results = [(tool, XGAToolResult(success=False, output=f"Execution error: {str(e)}"))
424
+ for tool in remaining_tools]
425
+
426
+ return completed_results + error_results
427
+
428
+ async def _execute_tools_in_parallel(self, tool_calls: List[Dict[str, Any]]) -> List[Tuple[Dict[str, Any], XGAToolResult]]:
429
+ if not tool_calls:
430
+ return []
431
+
432
+ try:
433
+ tool_names = [t.get('function_name', 'unknown') for t in tool_calls]
434
+ logging.info(f"Executing {len(tool_calls)} tools in parallel: {tool_names}")
435
+ self.trace.event(name="executing_tools_in_parallel", level="DEFAULT",
436
+ status_message=(f"Executing {len(tool_calls)} tools in parallel: {tool_names}"))
437
+
438
+ # Create tasks for all tool calls
439
+ tasks = [self._execute_tool(tool_call) for tool_call in tool_calls]
440
+
441
+ # Execute all tasks concurrently with error handling
442
+ results = await asyncio.gather(*tasks, return_exceptions=True)
443
+
444
+ # Process results and handle any exceptions
445
+ processed_results = []
446
+ for i, (tool_call, result) in enumerate(zip(tool_calls, results)):
447
+ if isinstance(result, Exception):
448
+ logging.error(f"Error executing tool {tool_call.get('function_name', 'unknown')}: {str(result)}")
449
+ self.trace.event(name="error_executing_tool", level="ERROR", status_message=(
450
+ f"Error executing tool {tool_call.get('function_name', 'unknown')}: {str(result)}"))
451
+ # Create error result
452
+ error_result = XGAToolResult(success=False, output=f"Error executing tool: {str(result)}")
453
+ processed_results.append((tool_call, error_result))
454
+ else:
455
+ processed_results.append((tool_call, result))
456
+
457
+ logging.info(f"Parallel execution completed for {len(tool_calls)} tools")
458
+ self.trace.event(name="parallel_execution_completed", level="DEFAULT",
459
+ status_message=(f"Parallel execution completed for {len(tool_calls)} tools"))
460
+ return processed_results
461
+
462
+ except Exception as e:
463
+ logging.error(f"Error in parallel tool execution: {str(e)}", exc_info=True)
464
+ self.trace.event(name="error_in_parallel_tool_execution", level="ERROR",
465
+ status_message=(f"Error in parallel tool execution: {str(e)}"))
466
+ # Return error results for all tools if the gather itself fails
467
+ return [(tool_call, XGAToolResult(success=False, output=f"Execution error: {str(e)}"))
468
+ for tool_call in tool_calls]
469
+
470
+ def _add_tool_result(
471
+ self,
472
+ thread_id: str,
473
+ tool_call: Dict[str, Any],
474
+ result: XGAToolResult,
475
+ strategy: Union[XmlAddingStrategy, str] = "assistant_message",
476
+ assistant_message_id: Optional[str] = None,
477
+ parsing_details: Optional[Dict[str, Any]] = None
478
+ ) -> Optional[Dict[str, Any]]: # Return the full message object
479
+ try:
480
+ message_obj = None # Initialize message_obj
481
+
482
+ # Create metadata with assistant_message_id if provided
483
+ metadata = {}
484
+ if assistant_message_id:
485
+ metadata["assistant_message_id"] = assistant_message_id
486
+ logging.info(f"Linking tool result to assistant message: {assistant_message_id}")
487
+ self.trace.event(name="linking_tool_result_to_assistant_message", level="DEFAULT",
488
+ status_message=(f"Linking tool result to assistant message: {assistant_message_id}"))
489
+
490
+ # --- Add parsing details to metadata if available ---
491
+ if parsing_details:
492
+ metadata["parsing_details"] = parsing_details
493
+ logging.info("Adding parsing_details to tool result metadata")
494
+ self.trace.event(name="adding_parsing_details_to_tool_result_metadata", level="DEFAULT",
495
+ status_message=(f"Adding parsing_details to tool result metadata"),
496
+ metadata={"parsing_details": parsing_details})
497
+ # ---
498
+
499
+ # Check if this is a native function call (has id field)
500
+ if "id" in tool_call:
501
+ # Format as a proper tool message according to OpenAI spec
502
+ function_name = tool_call.get("function_name", "")
503
+
504
+ # Format the tool result content - tool role needs string content
505
+ if isinstance(result, str):
506
+ content = result
507
+ elif hasattr(result, 'output'):
508
+ # If it's a XGAToolResult object
509
+ if isinstance(result.output, dict) or isinstance(result.output, list):
510
+ # If output is already a dict or list, convert to JSON string
511
+ content = json.dumps(result.output)
512
+ else:
513
+ # Otherwise just use the string representation
514
+ content = str(result.output)
515
+ else:
516
+ # Fallback to string representation of the whole result
517
+ content = str(result)
518
+
519
+ logging.info(f"Formatted tool result content: {content[:100]}...")
520
+ self.trace.event(name="formatted_tool_result_content", level="DEFAULT",
521
+ status_message=(f"Formatted tool result content: {content[:100]}..."))
522
+
523
+ # Create the tool response message with proper format
524
+ tool_message = {
525
+ "role": "tool",
526
+ "tool_call_id": tool_call["id"],
527
+ "name": function_name,
528
+ "content": content
529
+ }
530
+
531
+ logging.info(f"Adding native tool result for tool_call_id={tool_call['id']} with role=tool")
532
+ self.trace.event(name="adding_native_tool_result_for_tool_call_id", level="DEFAULT", status_message=(
533
+ f"Adding native tool result for tool_call_id={tool_call['id']} with role=tool"))
534
+
535
+ # Add as a tool message to the conversation history
536
+ # This makes the result visible to the LLM in the next turn
537
+ message_obj = self.add_message(
538
+ type="tool", # Special type for tool responses
539
+ content=tool_message,
540
+ is_llm_message=True,
541
+ metadata=metadata
542
+ )
543
+ return message_obj # Return the full message object
544
+
545
+ # For XML and other non-native tools, use the new structured format
546
+ # Determine message role based on strategy
547
+ result_role = "user" if strategy == "user_message" else "assistant"
548
+
549
+ # Create two versions of the structured result
550
+ # 1. Rich version for the frontend
551
+ structured_result_for_frontend = self._create_structured_tool_result(tool_call, result, parsing_details,
552
+ for_llm=False)
553
+ # 2. Concise version for the LLM
554
+ structured_result_for_llm = self._create_structured_tool_result(tool_call, result, parsing_details,
555
+ for_llm=True)
556
+
557
+ # Add the message with the appropriate role to the conversation history
558
+ # This allows the LLM to see the tool result in subsequent interactions
559
+ result_message_for_llm = {
560
+ "role": result_role,
561
+ "content": json.dumps(structured_result_for_llm)
562
+ }
563
+
564
+ # Add rich content to metadata for frontend use
565
+ if metadata is None:
566
+ metadata = {}
567
+ metadata['frontend_content'] = structured_result_for_frontend
568
+
569
+ message_obj = self._add_message_with_agent_info(
570
+ type="tool",
571
+ content=result_message_for_llm, # Save the LLM-friendly version
572
+ is_llm_message=True,
573
+ metadata=metadata
574
+ )
575
+
576
+ # If the message was saved, modify it in-memory for the frontend before returning
577
+ if message_obj:
578
+ # The frontend expects the rich content in the 'content' field.
579
+ # The DB has the rich content in metadata.frontend_content.
580
+ # Let's reconstruct the message for yielding.
581
+ message_for_yield = message_obj.copy()
582
+ message_for_yield['content'] = structured_result_for_frontend
583
+ return message_for_yield
584
+
585
+ return message_obj # Return the modified message object
586
+ except Exception as e:
587
+ logging.error(f"Error adding tool result: {str(e)}", exc_info=True)
588
+ self.trace.event(name="error_adding_tool_result", level="ERROR",
589
+ status_message=(f"Error adding tool result: {str(e)}"),
590
+ metadata={"tool_call": tool_call, "result": result, "strategy": strategy,
591
+ "assistant_message_id": assistant_message_id,
592
+ "parsing_details": parsing_details})
593
+ # Fallback to a simple message
594
+ try:
595
+ fallback_message = {
596
+ "role": "user",
597
+ "content": str(result)
598
+ }
599
+ message_obj = self.add_message(
600
+ type="tool",
601
+ content=fallback_message,
602
+ is_llm_message=True,
603
+ metadata={"assistant_message_id": assistant_message_id} if assistant_message_id else {}
604
+ )
605
+ return message_obj # Return the full message object
606
+ except Exception as e2:
607
+ logging.error(f"Failed even with fallback message: {str(e2)}", exc_info=True)
608
+ self.trace.event(name="failed_even_with_fallback_message", level="ERROR",
609
+ status_message=(f"Failed even with fallback message: {str(e2)}"),
610
+ metadata={"tool_call": tool_call, "result": result, "strategy": strategy,
611
+ "assistant_message_id": assistant_message_id,
612
+ "parsing_details": parsing_details})
613
+ return None # Return None on error
614
+
615
+ def _create_structured_tool_result(self, tool_call: Dict[str, Any], result: XGAToolResult,
616
+ parsing_details: Optional[Dict[str, Any]] = None, for_llm: bool = False):
617
+ function_name = tool_call.get("function_name", "unknown")
618
+ xml_tag_name = tool_call.get("xml_tag_name")
619
+ arguments = tool_call.get("arguments", {})
620
+ tool_call_id = tool_call.get("id")
621
+
622
+ # Process the output - if it's a JSON string, parse it back to an object
623
+ output = result.output if hasattr(result, 'output') else str(result)
624
+ if isinstance(output, str):
625
+ try:
626
+ # Try to parse as JSON to provide structured data to frontend
627
+ parsed_output = safe_json_parse(output)
628
+ # If parsing succeeded and we got a dict/list, use the parsed version
629
+ if isinstance(parsed_output, (dict, list)):
630
+ output = parsed_output
631
+ # Otherwise keep the original string
632
+ except Exception:
633
+ # If parsing fails, keep the original string
634
+ pass
635
+
636
+ output_to_use = output
637
+ # If this is for the LLM and it's an edit_file tool, create a concise output
638
+ if for_llm and function_name == 'edit_file' and isinstance(output, dict):
639
+ # The frontend needs original_content and updated_content to render diffs.
640
+ # The concise version for the LLM was causing issues.
641
+ # We will now pass the full output, and rely on the ContextManager to truncate if needed.
642
+ output_to_use = output
643
+
644
+ # Create the structured result
645
+ structured_result_v1 = {
646
+ "tool_execution": {
647
+ "function_name": function_name,
648
+ "xml_tag_name": xml_tag_name,
649
+ "tool_call_id": tool_call_id,
650
+ "arguments": arguments,
651
+ "result": {
652
+ "success": result.success if hasattr(result, 'success') else True,
653
+ "output": output_to_use, # This will be either rich or concise based on `for_llm`
654
+ "error": getattr(result, 'error', None) if hasattr(result, 'error') else None
655
+ },
656
+ }
657
+ }
658
+
659
+ return structured_result_v1
660
+
661
+ def _create_tool_context(self, tool_call: Dict[str, Any], tool_index: int,
662
+ assistant_message_id: Optional[str] = None,
663
+ parsing_details: Optional[Dict[str, Any]] = None) -> ToolExecutionContext:
664
+ """Create a tool execution context with display name and parsing details populated."""
665
+ context = ToolExecutionContext(
666
+ tool_call=tool_call,
667
+ tool_index=tool_index,
668
+ assistant_message_id=assistant_message_id,
669
+ parsing_details=parsing_details
670
+ )
671
+
672
+ # Set function_name and xml_tag_name fields
673
+ if "xml_tag_name" in tool_call:
674
+ context.xml_tag_name = tool_call["xml_tag_name"]
675
+ context.function_name = tool_call.get("function_name", tool_call["xml_tag_name"])
676
+ else:
677
+ # For non-XML tools, use function name directly
678
+ context.function_name = tool_call.get("function_name", "unknown")
679
+ context.xml_tag_name = None
680
+
681
+ return context
682
+
683
+ def _yield_and_save_tool_started(self, context: ToolExecutionContext, thread_id: str, thread_run_id: str) -> \
684
+ Optional[Dict[str, Any]]:
685
+ """Formats, saves, and returns a tool started status message."""
686
+ tool_name = context.xml_tag_name or context.function_name
687
+ content = {
688
+ "role": "assistant", "status_type": "tool_started",
689
+ "function_name": context.function_name, "xml_tag_name": context.xml_tag_name,
690
+ "message": f"Starting execution of {tool_name}", "tool_index": context.tool_index,
691
+ "tool_call_id": context.tool_call.get("id") # Include tool_call ID if native
692
+ }
693
+ metadata = {"thread_run_id": thread_run_id}
694
+ saved_message_obj = self.add_message(
695
+ type="status", content=content, is_llm_message=False, metadata=metadata
696
+ )
697
+ return saved_message_obj # Return the full object (or None if saving failed)
698
+
699
+ def _yield_and_save_tool_completed(self, context: ToolExecutionContext, tool_message_id: Optional[str],
700
+ thread_id: str, thread_run_id: str) -> Optional[Dict[str, Any]]:
701
+ """Formats, saves, and returns a tool completed/failed status message."""
702
+ if not context.result:
703
+ # Delegate to error saving if result is missing (e.g., execution failed)
704
+ return self._yield_and_save_tool_error(context, thread_id, thread_run_id)
705
+
706
+ tool_name = context.xml_tag_name or context.function_name
707
+ status_type = "tool_completed" if context.result.success else "tool_failed"
708
+ message_text = f"Tool {tool_name} {'completed successfully' if context.result.success else 'failed'}"
709
+
710
+ content = {
711
+ "role": "assistant", "status_type": status_type,
712
+ "function_name": context.function_name, "xml_tag_name": context.xml_tag_name,
713
+ "message": message_text, "tool_index": context.tool_index,
714
+ "tool_call_id": context.tool_call.get("id")
715
+ }
716
+ metadata = {"thread_run_id": thread_run_id}
717
+ # Add the *actual* tool result message ID to the metadata if available and successful
718
+ if context.result.success and tool_message_id:
719
+ metadata["linked_tool_result_message_id"] = tool_message_id
720
+
721
+ # <<< ADDED: Signal if this is a terminating tool >>>
722
+ if context.function_name in ['ask', 'complete']:
723
+ metadata["agent_should_terminate"] = "true"
724
+ logging.info(f"Marking tool status for '{context.function_name}' with termination signal.")
725
+ self.trace.event(name="marking_tool_status_for_termination", level="DEFAULT", status_message=(
726
+ f"Marking tool status for '{context.function_name}' with termination signal."))
727
+ # <<< END ADDED >>>
728
+
729
+ saved_message_obj = self.add_message(
730
+ type="status", content=content, is_llm_message=False, metadata=metadata
731
+ )
732
+ return saved_message_obj
733
+
734
+ def _yield_and_save_tool_error(self, context: ToolExecutionContext, thread_id: str, thread_run_id: str) -> \
735
+ Optional[Dict[str, Any]]:
736
+ """Formats, saves, and returns a tool error status message."""
737
+ error_msg = str(context.error) if context.error else "Unknown error during tool execution"
738
+ tool_name = context.xml_tag_name or context.function_name
739
+ content = {
740
+ "role": "assistant", "status_type": "tool_error",
741
+ "function_name": context.function_name, "xml_tag_name": context.xml_tag_name,
742
+ "message": f"Error executing tool {tool_name}: {error_msg}",
743
+ "tool_index": context.tool_index,
744
+ "tool_call_id": context.tool_call.get("id")
745
+ }
746
+ metadata = {"thread_run_id": thread_run_id}
747
+ # Save the status message with is_llm_message=False
748
+ saved_message_obj = self.add_message(
749
+ type="status", content=content, is_llm_message=False, metadata=metadata
750
+ )
751
+ return saved_message_obj