xgae 0.1.3__py3-none-any.whl → 0.1.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of xgae might be problematic. Click here for more details.

@@ -0,0 +1,710 @@
1
+ import asyncio
2
+ import json
3
+ import logging
4
+ from dataclasses import dataclass
5
+ from typing import List, Dict, Any, Optional, Tuple, Union, Literal, Callable,TypedDict,AsyncGenerator
6
+ from abc import ABC, abstractmethod
7
+
8
+ from xgae.engine.xga_base import XGAToolResult, XGAResponseMsg, XGAToolBox
9
+ # from xgae.utils.setup_env import langfuse
10
+ from xgae.utils.json_helpers import (
11
+ safe_json_parse,
12
+ format_for_yield
13
+ )
14
+ from xgae.utils.xml_tool_parser import XMLToolParser
15
+
16
+ # Type alias for XML result adding strategy
17
+ XmlAddingStrategy = Literal["user_message", "assistant_message", "inline_edit"]
18
+
19
+ # Type alias for tool execution strategy
20
+ ToolExecutionStrategy = Literal["sequential", "parallel"]
21
+
22
+ class TaskResponseContext(TypedDict, total=False):
23
+ is_stream: bool
24
+ task_id: str
25
+ task_run_id: str
26
+ trace_id: str
27
+ model_name: str
28
+ max_xml_tool_calls: int
29
+ add_response_msg_func: Callable
30
+ tool_box: XGAToolBox
31
+ tool_execution_strategy: Literal["sequential", "parallel"]
32
+ xml_adding_strategy: Literal["user_message", "assistant_message", "inline_edit"]
33
+
34
+
35
+ class TaskRunContinuousState(TypedDict, total=False):
36
+ accumulated_content: str
37
+ auto_continue_count: int
38
+ auto_continue: bool
39
+
40
+ class Span:
41
+ def end(self, **kwargs):
42
+ pass
43
+
44
+ class Trace:
45
+ def event(self, **kwargs):
46
+ pass
47
+
48
+ def span(self, **kwargs):
49
+ return Span()
50
+
51
+
52
+ @dataclass
53
+ class ToolExecutionContext:
54
+ """Context for a tool execution including call details, result, and display info."""
55
+ tool_call: Dict[str, Any]
56
+ tool_index: int
57
+ result: Optional[XGAToolResult] = None
58
+ function_name: Optional[str] = None
59
+ xml_tag_name: Optional[str] = None
60
+ error: Optional[Exception] = None
61
+ assistant_message_id: Optional[str] = None
62
+ parsing_details: Optional[Dict[str, Any]] = None
63
+
64
+
65
+ class TaskResponseProcessor(ABC):
66
+ def __init__(self, response_context: TaskResponseContext):
67
+ self.response_context = response_context
68
+
69
+ self.task_id = response_context.get("task_id", "")
70
+ self.task_run_id = response_context.get("task_run_id", "")
71
+
72
+ # Initialize the XML parser
73
+ self.trace = Trace()
74
+
75
+ self.add_message = response_context.get("add_context_msg")
76
+ self._add_message_with_agent_info = self.add_message
77
+ self.tool_box = response_context.get("tool_box")
78
+
79
+ self.xml_parser = XMLToolParser()
80
+
81
+ @abstractmethod
82
+ async def process_response(self,
83
+ llm_response: AsyncGenerator,
84
+ prompt_messages: List[Dict[str, Any]],
85
+ continuous_state: Optional[TaskRunContinuousState] = None
86
+ ) -> AsyncGenerator[Dict[str, Any], None]:
87
+ pass
88
+
89
+ async def _yield_message(self, message_obj: Optional[Dict[str, Any]]) -> Optional[Dict[str, Any]]:
90
+ """Helper to yield a message with proper formatting.
91
+
92
+ Ensures that content and metadata are JSON strings for client compatibility.
93
+ """
94
+ if message_obj:
95
+ return format_for_yield(message_obj)
96
+ return None
97
+
98
+
99
+
100
+ def _extract_xml_chunks(self, content: str) -> List[str]:
101
+ """Extract complete XML chunks using start and end pattern matching."""
102
+ chunks = []
103
+ pos = 0
104
+
105
+ try:
106
+ # First, look for new format <function_calls> blocks
107
+ start_pattern = '<function_calls>'
108
+ end_pattern = '</function_calls>'
109
+
110
+ while pos < len(content):
111
+ # Find the next function_calls block
112
+ start_pos = content.find(start_pattern, pos)
113
+ if start_pos == -1:
114
+ break
115
+
116
+ # Find the matching end tag
117
+ end_pos = content.find(end_pattern, start_pos)
118
+ if end_pos == -1:
119
+ break
120
+
121
+ # Extract the complete block including tags
122
+ chunk_end = end_pos + len(end_pattern)
123
+ chunk = content[start_pos:chunk_end]
124
+ chunks.append(chunk)
125
+
126
+ # Move position past this chunk
127
+ pos = chunk_end
128
+
129
+ # If no new format found, fall back to old format for backwards compatibility
130
+ if not chunks:
131
+ pos = 0
132
+ while pos < len(content):
133
+ # Find the next tool tag
134
+ next_tag_start = -1
135
+ current_tag = None
136
+
137
+ # Find the earliest occurrence of any registered tool function name
138
+ # Check for available function names
139
+ #available_functions = self.tool_registry.get_available_functions()
140
+ available_functions = self.tool_box.get_task_tool_names(self.task_id)
141
+ for func_name in available_functions:
142
+ # Convert function name to potential tag name (underscore to dash)
143
+ tag_name = func_name.replace('_', '-')
144
+ start_pattern = f'<{tag_name}'
145
+ tag_pos = content.find(start_pattern, pos)
146
+
147
+ if tag_pos != -1 and (next_tag_start == -1 or tag_pos < next_tag_start):
148
+ next_tag_start = tag_pos
149
+ current_tag = tag_name
150
+
151
+ if next_tag_start == -1 or not current_tag:
152
+ break
153
+
154
+ # Find the matching end tag
155
+ end_pattern = f'</{current_tag}>'
156
+ tag_stack = []
157
+ chunk_start = next_tag_start
158
+ current_pos = next_tag_start
159
+
160
+ while current_pos < len(content):
161
+ # Look for next start or end tag of the same type
162
+ next_start = content.find(f'<{current_tag}', current_pos + 1)
163
+ next_end = content.find(end_pattern, current_pos)
164
+
165
+ if next_end == -1: # No closing tag found
166
+ break
167
+
168
+ if next_start != -1 and next_start < next_end:
169
+ # Found nested start tag
170
+ tag_stack.append(next_start)
171
+ current_pos = next_start + 1
172
+ else:
173
+ # Found end tag
174
+ if not tag_stack: # This is our matching end tag
175
+ chunk_end = next_end + len(end_pattern)
176
+ chunk = content[chunk_start:chunk_end]
177
+ chunks.append(chunk)
178
+ pos = chunk_end
179
+ break
180
+ else:
181
+ # Pop nested tag
182
+ tag_stack.pop()
183
+ current_pos = next_end + 1
184
+
185
+ if current_pos >= len(content): # Reached end without finding closing tag
186
+ break
187
+
188
+ pos = max(pos + 1, current_pos)
189
+
190
+ except Exception as e:
191
+ logging.error(f"Error extracting XML chunks: {e}")
192
+ logging.error(f"Content was: {content}")
193
+ self.trace.event(name="error_extracting_xml_chunks", level="ERROR",
194
+ status_message=(f"Error extracting XML chunks: {e}"), metadata={"content": content})
195
+
196
+ return chunks
197
+
198
+ def _parse_xml_tool_call(self, xml_chunk: str) -> Optional[Tuple[Dict[str, Any], Dict[str, Any]]]:
199
+ """Parse XML chunk into tool call format and return parsing details.
200
+
201
+ Returns:
202
+ Tuple of (tool_call, parsing_details) or None if parsing fails.
203
+ - tool_call: Dict with 'function_name', 'xml_tag_name', 'arguments'
204
+ - parsing_details: Dict with 'attributes', 'elements', 'text_content', 'root_content'
205
+ """
206
+ try:
207
+ # Check if this is the new format (contains <function_calls>)
208
+ if '<function_calls>' in xml_chunk and '<invoke' in xml_chunk:
209
+ # Use the new XML parser
210
+ parsed_calls = self.xml_parser.parse_content(xml_chunk)
211
+
212
+ if not parsed_calls:
213
+ logging.error(f"No tool calls found in XML chunk: {xml_chunk}")
214
+ return None
215
+
216
+ # Take the first tool call (should only be one per chunk)
217
+ xml_tool_call = parsed_calls[0]
218
+
219
+ # Convert to the expected format
220
+ tool_call = {
221
+ "function_name": xml_tool_call.function_name,
222
+ "xml_tag_name": xml_tool_call.function_name.replace('_', '-'), # For backwards compatibility
223
+ "arguments": xml_tool_call.parameters
224
+ }
225
+
226
+ # Include the parsing details
227
+ parsing_details = xml_tool_call.parsing_details
228
+ parsing_details["raw_xml"] = xml_tool_call.raw_xml
229
+
230
+ logging.debug(f"Parsed new format tool call: {tool_call}")
231
+ return tool_call, parsing_details
232
+
233
+ # If not the expected <function_calls><invoke> format, return None
234
+ logging.error(f"XML chunk does not contain expected <function_calls><invoke> format: {xml_chunk}")
235
+ return None
236
+
237
+ except Exception as e:
238
+ logging.error(f"Error parsing XML chunk: {e}")
239
+ logging.error(f"XML chunk was: {xml_chunk}")
240
+ self.trace.event(name="error_parsing_xml_chunk", level="ERROR",
241
+ status_message=(f"Error parsing XML chunk: {e}"), metadata={"xml_chunk": xml_chunk})
242
+ return None
243
+
244
+ def _parse_xml_tool_calls(self, content: str) -> List[Dict[str, Any]]:
245
+ """Parse XML tool calls from content string.
246
+
247
+ Returns:
248
+ List of dictionaries, each containing {'tool_call': ..., 'parsing_details': ...}
249
+ """
250
+ parsed_data = []
251
+
252
+ try:
253
+ xml_chunks = self._extract_xml_chunks(content)
254
+
255
+ for xml_chunk in xml_chunks:
256
+ result = self._parse_xml_tool_call(xml_chunk)
257
+ if result:
258
+ tool_call, parsing_details = result
259
+ parsed_data.append({
260
+ "tool_call": tool_call,
261
+ "parsing_details": parsing_details
262
+ })
263
+
264
+ except Exception as e:
265
+ logging.error(f"Error parsing XML tool calls: {e}", exc_info=True)
266
+ self.trace.event(name="error_parsing_xml_tool_calls", level="ERROR",
267
+ status_message=(f"Error parsing XML tool calls: {e}"), metadata={"content": content})
268
+
269
+ return parsed_data
270
+
271
+ # Tool execution methods
272
+ async def _execute_tool(self, tool_call: Dict[str, Any]) -> XGAToolResult:
273
+ """Execute a single tool call and return the result."""
274
+ span = self.trace.span(name=f"execute_tool.{tool_call['function_name']}", input=tool_call["arguments"])
275
+ try:
276
+ function_name = tool_call["function_name"]
277
+ arguments = tool_call["arguments"]
278
+
279
+ logging.info(f"Executing tool: {function_name} with arguments: {arguments}")
280
+ self.trace.event(name="executing_tool", level="DEFAULT",
281
+ status_message=(f"Executing tool: {function_name} with arguments: {arguments}"))
282
+
283
+ if isinstance(arguments, str):
284
+ try:
285
+ arguments = safe_json_parse(arguments)
286
+ except json.JSONDecodeError:
287
+ arguments = {"text": arguments} # @todo modify
288
+
289
+ # Get available functions from tool registry
290
+ #available_functions = self.tool_registry.get_available_functions()
291
+ result = None
292
+ available_function_names = self.tool_box.get_task_tool_names(self.task_id)
293
+ if function_name in available_function_names:
294
+ result = await self.tool_box.call_tool(self.task_id, function_name, arguments)
295
+ else:
296
+ logging.error(f"Tool function '{function_name}' not found in registry")
297
+ result = XGAToolResult(success=False, output=f"Tool function '{function_name}' not found")
298
+ logging.info(f"Tool execution complete: {function_name} -> {result}")
299
+ span.end(status_message="tool_executed", output=result)
300
+ return result
301
+ except Exception as e:
302
+ logging.error(f"Error executing tool {tool_call['function_name']}: {str(e)}", exc_info=True)
303
+ span.end(status_message="tool_execution_error", output=f"Error executing tool: {str(e)}", level="ERROR")
304
+ return XGAToolResult(success=False, output=f"Error executing tool: {str(e)}")
305
+
306
+ async def _execute_tools(
307
+ self,
308
+ tool_calls: List[Dict[str, Any]],
309
+ execution_strategy: ToolExecutionStrategy = "sequential"
310
+ ) -> List[Tuple[Dict[str, Any], XGAToolResult]]:
311
+ logging.info(f"Executing {len(tool_calls)} tools with strategy: {execution_strategy}")
312
+ self.trace.event(name="executing_tools_with_strategy", level="DEFAULT",
313
+ status_message=(f"Executing {len(tool_calls)} tools with strategy: {execution_strategy}"))
314
+
315
+ if execution_strategy == "sequential":
316
+ return await self._execute_tools_sequentially(tool_calls)
317
+ elif execution_strategy == "parallel":
318
+ return await self._execute_tools_in_parallel(tool_calls)
319
+ else:
320
+ logging.warning(f"Unknown execution strategy: {execution_strategy}, falling back to sequential")
321
+ return await self._execute_tools_sequentially(tool_calls)
322
+
323
+ async def _execute_tools_sequentially(self, tool_calls: List[Dict[str, Any]]) -> List[
324
+ Tuple[Dict[str, Any], XGAToolResult]]:
325
+ """Execute tool calls sequentially and return results.
326
+
327
+ This method executes tool calls one after another, waiting for each tool to complete
328
+ before starting the next one. This is useful when tools have dependencies on each other.
329
+
330
+ Args:
331
+ tool_calls: List of tool calls to execute
332
+
333
+ Returns:
334
+ List of tuples containing the original tool call and its result
335
+ """
336
+ if not tool_calls:
337
+ return []
338
+
339
+ try:
340
+ tool_names = [t.get('function_name', 'unknown') for t in tool_calls]
341
+ logging.info(f"Executing {len(tool_calls)} tools sequentially: {tool_names}")
342
+ self.trace.event(name="executing_tools_sequentially", level="DEFAULT",
343
+ status_message=(f"Executing {len(tool_calls)} tools sequentially: {tool_names}"))
344
+
345
+ results = []
346
+ for index, tool_call in enumerate(tool_calls):
347
+ tool_name = tool_call.get('function_name', 'unknown')
348
+ logging.debug(f"Executing tool {index + 1}/{len(tool_calls)}: {tool_name}")
349
+
350
+ try:
351
+ result = await self._execute_tool(tool_call)
352
+ results.append((tool_call, result))
353
+ logging.debug(f"Completed tool {tool_name} with success={result.success}")
354
+
355
+ # Check if this is a terminating tool (ask or complete)
356
+ if tool_name in ['ask', 'complete']:
357
+ logging.info(f"Terminating tool '{tool_name}' executed. Stopping further tool execution.")
358
+ self.trace.event(name="terminating_tool_executed", level="DEFAULT", status_message=(
359
+ f"Terminating tool '{tool_name}' executed. Stopping further tool execution."))
360
+ break # Stop executing remaining tools
361
+
362
+ except Exception as e:
363
+ logging.error(f"Error executing tool {tool_name}: {str(e)}")
364
+ self.trace.event(name="error_executing_tool", level="ERROR",
365
+ status_message=(f"Error executing tool {tool_name}: {str(e)}"))
366
+ error_result = XGAToolResult(success=False, output=f"Error executing tool: {str(e)}")
367
+ results.append((tool_call, error_result))
368
+
369
+ logging.info(f"Sequential execution completed for {len(results)} tools (out of {len(tool_calls)} total)")
370
+ self.trace.event(name="sequential_execution_completed", level="DEFAULT", status_message=(
371
+ f"Sequential execution completed for {len(results)} tools (out of {len(tool_calls)} total)"))
372
+ return results
373
+
374
+ except Exception as e:
375
+ logging.error(f"Error in sequential tool execution: {str(e)}", exc_info=True)
376
+ # Return partial results plus error results for remaining tools
377
+ completed_results = results if 'results' in locals() else []
378
+ completed_tool_names = [r[0].get('function_name', 'unknown') for r in completed_results]
379
+ remaining_tools = [t for t in tool_calls if t.get('function_name', 'unknown') not in completed_tool_names]
380
+
381
+ # Add error results for remaining tools
382
+ error_results = [(tool, XGAToolResult(success=False, output=f"Execution error: {str(e)}"))
383
+ for tool in remaining_tools]
384
+
385
+ return completed_results + error_results
386
+
387
+ async def _execute_tools_in_parallel(self, tool_calls: List[Dict[str, Any]]) -> List[Tuple[Dict[str, Any], XGAToolResult]]:
388
+ if not tool_calls:
389
+ return []
390
+
391
+ try:
392
+ tool_names = [t.get('function_name', 'unknown') for t in tool_calls]
393
+ logging.info(f"Executing {len(tool_calls)} tools in parallel: {tool_names}")
394
+ self.trace.event(name="executing_tools_in_parallel", level="DEFAULT",
395
+ status_message=(f"Executing {len(tool_calls)} tools in parallel: {tool_names}"))
396
+
397
+ # Create tasks for all tool calls
398
+ tasks = [self._execute_tool(tool_call) for tool_call in tool_calls]
399
+
400
+ # Execute all tasks concurrently with error handling
401
+ results = await asyncio.gather(*tasks, return_exceptions=True)
402
+
403
+ # Process results and handle any exceptions
404
+ processed_results = []
405
+ for i, (tool_call, result) in enumerate(zip(tool_calls, results)):
406
+ if isinstance(result, Exception):
407
+ logging.error(f"Error executing tool {tool_call.get('function_name', 'unknown')}: {str(result)}")
408
+ self.trace.event(name="error_executing_tool", level="ERROR", status_message=(
409
+ f"Error executing tool {tool_call.get('function_name', 'unknown')}: {str(result)}"))
410
+ # Create error result
411
+ error_result = XGAToolResult(success=False, output=f"Error executing tool: {str(result)}")
412
+ processed_results.append((tool_call, error_result))
413
+ else:
414
+ processed_results.append((tool_call, result))
415
+
416
+ logging.info(f"Parallel execution completed for {len(tool_calls)} tools")
417
+ self.trace.event(name="parallel_execution_completed", level="DEFAULT",
418
+ status_message=(f"Parallel execution completed for {len(tool_calls)} tools"))
419
+ return processed_results
420
+
421
+ except Exception as e:
422
+ logging.error(f"Error in parallel tool execution: {str(e)}", exc_info=True)
423
+ self.trace.event(name="error_in_parallel_tool_execution", level="ERROR",
424
+ status_message=(f"Error in parallel tool execution: {str(e)}"))
425
+ # Return error results for all tools if the gather itself fails
426
+ return [(tool_call, XGAToolResult(success=False, output=f"Execution error: {str(e)}"))
427
+ for tool_call in tool_calls]
428
+
429
+ def _add_tool_result(
430
+ self,
431
+ thread_id: str,
432
+ tool_call: Dict[str, Any],
433
+ result: XGAToolResult,
434
+ strategy: Union[XmlAddingStrategy, str] = "assistant_message",
435
+ assistant_message_id: Optional[str] = None,
436
+ parsing_details: Optional[Dict[str, Any]] = None
437
+ ) -> Optional[Dict[str, Any]]: # Return the full message object
438
+ try:
439
+ message_obj = None # Initialize message_obj
440
+
441
+ # Create metadata with assistant_message_id if provided
442
+ metadata = {}
443
+ if assistant_message_id:
444
+ metadata["assistant_message_id"] = assistant_message_id
445
+ logging.info(f"Linking tool result to assistant message: {assistant_message_id}")
446
+ self.trace.event(name="linking_tool_result_to_assistant_message", level="DEFAULT",
447
+ status_message=(f"Linking tool result to assistant message: {assistant_message_id}"))
448
+
449
+ # --- Add parsing details to metadata if available ---
450
+ if parsing_details:
451
+ metadata["parsing_details"] = parsing_details
452
+ logging.info("Adding parsing_details to tool result metadata")
453
+ self.trace.event(name="adding_parsing_details_to_tool_result_metadata", level="DEFAULT",
454
+ status_message=(f"Adding parsing_details to tool result metadata"),
455
+ metadata={"parsing_details": parsing_details})
456
+ # ---
457
+
458
+ # Check if this is a native function call (has id field)
459
+ if "id" in tool_call:
460
+ # Format as a proper tool message according to OpenAI spec
461
+ function_name = tool_call.get("function_name", "")
462
+
463
+ # Format the tool result content - tool role needs string content
464
+ if isinstance(result, str):
465
+ content = result
466
+ elif hasattr(result, 'output'):
467
+ # If it's a XGAToolResult object
468
+ if isinstance(result.output, dict) or isinstance(result.output, list):
469
+ # If output is already a dict or list, convert to JSON string
470
+ content = json.dumps(result.output)
471
+ else:
472
+ # Otherwise just use the string representation
473
+ content = str(result.output)
474
+ else:
475
+ # Fallback to string representation of the whole result
476
+ content = str(result)
477
+
478
+ logging.info(f"Formatted tool result content: {content[:100]}...")
479
+ self.trace.event(name="formatted_tool_result_content", level="DEFAULT",
480
+ status_message=(f"Formatted tool result content: {content[:100]}..."))
481
+
482
+ # Create the tool response message with proper format
483
+ tool_message = {
484
+ "role": "tool",
485
+ "tool_call_id": tool_call["id"],
486
+ "name": function_name,
487
+ "content": content
488
+ }
489
+
490
+ logging.info(f"Adding native tool result for tool_call_id={tool_call['id']} with role=tool")
491
+ self.trace.event(name="adding_native_tool_result_for_tool_call_id", level="DEFAULT", status_message=(
492
+ f"Adding native tool result for tool_call_id={tool_call['id']} with role=tool"))
493
+
494
+ # Add as a tool message to the conversation history
495
+ # This makes the result visible to the LLM in the next turn
496
+ message_obj = self.add_message(
497
+ type="tool", # Special type for tool responses
498
+ content=tool_message,
499
+ is_llm_message=True,
500
+ metadata=metadata
501
+ )
502
+ return message_obj # Return the full message object
503
+
504
+ # For XML and other non-native tools, use the new structured format
505
+ # Determine message role based on strategy
506
+ result_role = "user" if strategy == "user_message" else "assistant"
507
+
508
+ # Create two versions of the structured result
509
+ # 1. Rich version for the frontend
510
+ structured_result_for_frontend = self._create_structured_tool_result(tool_call, result, parsing_details,
511
+ for_llm=False)
512
+ # 2. Concise version for the LLM
513
+ structured_result_for_llm = self._create_structured_tool_result(tool_call, result, parsing_details,
514
+ for_llm=True)
515
+
516
+ # Add the message with the appropriate role to the conversation history
517
+ # This allows the LLM to see the tool result in subsequent interactions
518
+ result_message_for_llm = {
519
+ "role": result_role,
520
+ "content": json.dumps(structured_result_for_llm)
521
+ }
522
+
523
+ # Add rich content to metadata for frontend use
524
+ if metadata is None:
525
+ metadata = {}
526
+ metadata['frontend_content'] = structured_result_for_frontend
527
+
528
+ message_obj = self._add_message_with_agent_info(
529
+ type="tool",
530
+ content=result_message_for_llm, # Save the LLM-friendly version
531
+ is_llm_message=True,
532
+ metadata=metadata
533
+ )
534
+
535
+ # If the message was saved, modify it in-memory for the frontend before returning
536
+ if message_obj:
537
+ # The frontend expects the rich content in the 'content' field.
538
+ # The DB has the rich content in metadata.frontend_content.
539
+ # Let's reconstruct the message for yielding.
540
+ message_for_yield = message_obj.copy()
541
+ message_for_yield['content'] = structured_result_for_frontend
542
+ return message_for_yield
543
+
544
+ return message_obj # Return the modified message object
545
+ except Exception as e:
546
+ logging.error(f"Error adding tool result: {str(e)}", exc_info=True)
547
+ self.trace.event(name="error_adding_tool_result", level="ERROR",
548
+ status_message=(f"Error adding tool result: {str(e)}"),
549
+ metadata={"tool_call": tool_call, "result": result, "strategy": strategy,
550
+ "assistant_message_id": assistant_message_id,
551
+ "parsing_details": parsing_details})
552
+ # Fallback to a simple message
553
+ try:
554
+ fallback_message = {
555
+ "role": "user",
556
+ "content": str(result)
557
+ }
558
+ message_obj = self.add_message(
559
+ type="tool",
560
+ content=fallback_message,
561
+ is_llm_message=True,
562
+ metadata={"assistant_message_id": assistant_message_id} if assistant_message_id else {}
563
+ )
564
+ return message_obj # Return the full message object
565
+ except Exception as e2:
566
+ logging.error(f"Failed even with fallback message: {str(e2)}", exc_info=True)
567
+ self.trace.event(name="failed_even_with_fallback_message", level="ERROR",
568
+ status_message=(f"Failed even with fallback message: {str(e2)}"),
569
+ metadata={"tool_call": tool_call, "result": result, "strategy": strategy,
570
+ "assistant_message_id": assistant_message_id,
571
+ "parsing_details": parsing_details})
572
+ return None # Return None on error
573
+
574
+ def _create_structured_tool_result(self, tool_call: Dict[str, Any], result: XGAToolResult,
575
+ parsing_details: Optional[Dict[str, Any]] = None, for_llm: bool = False):
576
+ function_name = tool_call.get("function_name", "unknown")
577
+ xml_tag_name = tool_call.get("xml_tag_name")
578
+ arguments = tool_call.get("arguments", {})
579
+ tool_call_id = tool_call.get("id")
580
+
581
+ # Process the output - if it's a JSON string, parse it back to an object
582
+ output = result.output if hasattr(result, 'output') else str(result)
583
+ if isinstance(output, str):
584
+ try:
585
+ # Try to parse as JSON to provide structured data to frontend
586
+ parsed_output = safe_json_parse(output)
587
+ # If parsing succeeded and we got a dict/list, use the parsed version
588
+ if isinstance(parsed_output, (dict, list)):
589
+ output = parsed_output
590
+ # Otherwise keep the original string
591
+ except Exception:
592
+ # If parsing fails, keep the original string
593
+ pass
594
+
595
+ output_to_use = output
596
+ # If this is for the LLM and it's an edit_file tool, create a concise output
597
+ if for_llm and function_name == 'edit_file' and isinstance(output, dict):
598
+ # The frontend needs original_content and updated_content to render diffs.
599
+ # The concise version for the LLM was causing issues.
600
+ # We will now pass the full output, and rely on the ContextManager to truncate if needed.
601
+ output_to_use = output
602
+
603
+ # Create the structured result
604
+ structured_result_v1 = {
605
+ "tool_execution": {
606
+ "function_name": function_name,
607
+ "xml_tag_name": xml_tag_name,
608
+ "tool_call_id": tool_call_id,
609
+ "arguments": arguments,
610
+ "result": {
611
+ "success": result.success if hasattr(result, 'success') else True,
612
+ "output": output_to_use, # This will be either rich or concise based on `for_llm`
613
+ "error": getattr(result, 'error', None) if hasattr(result, 'error') else None
614
+ },
615
+ }
616
+ }
617
+
618
+ return structured_result_v1
619
+
620
+ def _create_tool_context(self, tool_call: Dict[str, Any], tool_index: int,
621
+ assistant_message_id: Optional[str] = None,
622
+ parsing_details: Optional[Dict[str, Any]] = None) -> ToolExecutionContext:
623
+ """Create a tool execution context with display name and parsing details populated."""
624
+ context = ToolExecutionContext(
625
+ tool_call=tool_call,
626
+ tool_index=tool_index,
627
+ assistant_message_id=assistant_message_id,
628
+ parsing_details=parsing_details
629
+ )
630
+
631
+ # Set function_name and xml_tag_name fields
632
+ if "xml_tag_name" in tool_call:
633
+ context.xml_tag_name = tool_call["xml_tag_name"]
634
+ context.function_name = tool_call.get("function_name", tool_call["xml_tag_name"])
635
+ else:
636
+ # For non-XML tools, use function name directly
637
+ context.function_name = tool_call.get("function_name", "unknown")
638
+ context.xml_tag_name = None
639
+
640
+ return context
641
+
642
+ def _yield_and_save_tool_started(self, context: ToolExecutionContext, thread_id: str, thread_run_id: str) -> \
643
+ Optional[Dict[str, Any]]:
644
+ """Formats, saves, and returns a tool started status message."""
645
+ tool_name = context.xml_tag_name or context.function_name
646
+ content = {
647
+ "role": "assistant", "status_type": "tool_started",
648
+ "function_name": context.function_name, "xml_tag_name": context.xml_tag_name,
649
+ "message": f"Starting execution of {tool_name}", "tool_index": context.tool_index,
650
+ "tool_call_id": context.tool_call.get("id") # Include tool_call ID if native
651
+ }
652
+ metadata = {"thread_run_id": thread_run_id}
653
+ saved_message_obj = self.add_message(
654
+ type="status", content=content, is_llm_message=False, metadata=metadata
655
+ )
656
+ return saved_message_obj # Return the full object (or None if saving failed)
657
+
658
+ def _yield_and_save_tool_completed(self, context: ToolExecutionContext, tool_message_id: Optional[str],
659
+ thread_id: str, thread_run_id: str) -> Optional[Dict[str, Any]]:
660
+ """Formats, saves, and returns a tool completed/failed status message."""
661
+ if not context.result:
662
+ # Delegate to error saving if result is missing (e.g., execution failed)
663
+ return self._yield_and_save_tool_error(context, thread_id, thread_run_id)
664
+
665
+ tool_name = context.xml_tag_name or context.function_name
666
+ status_type = "tool_completed" if context.result.success else "tool_failed"
667
+ message_text = f"Tool {tool_name} {'completed successfully' if context.result.success else 'failed'}"
668
+
669
+ content = {
670
+ "role": "assistant", "status_type": status_type,
671
+ "function_name": context.function_name, "xml_tag_name": context.xml_tag_name,
672
+ "message": message_text, "tool_index": context.tool_index,
673
+ "tool_call_id": context.tool_call.get("id")
674
+ }
675
+ metadata = {"thread_run_id": thread_run_id}
676
+ # Add the *actual* tool result message ID to the metadata if available and successful
677
+ if context.result.success and tool_message_id:
678
+ metadata["linked_tool_result_message_id"] = tool_message_id
679
+
680
+ # <<< ADDED: Signal if this is a terminating tool >>>
681
+ if context.function_name in ['ask', 'complete']:
682
+ metadata["agent_should_terminate"] = "true"
683
+ logging.info(f"Marking tool status for '{context.function_name}' with termination signal.")
684
+ self.trace.event(name="marking_tool_status_for_termination", level="DEFAULT", status_message=(
685
+ f"Marking tool status for '{context.function_name}' with termination signal."))
686
+ # <<< END ADDED >>>
687
+
688
+ saved_message_obj = self.add_message(
689
+ type="status", content=content, is_llm_message=False, metadata=metadata
690
+ )
691
+ return saved_message_obj
692
+
693
+ def _yield_and_save_tool_error(self, context: ToolExecutionContext, thread_id: str, thread_run_id: str) -> \
694
+ Optional[Dict[str, Any]]:
695
+ """Formats, saves, and returns a tool error status message."""
696
+ error_msg = str(context.error) if context.error else "Unknown error during tool execution"
697
+ tool_name = context.xml_tag_name or context.function_name
698
+ content = {
699
+ "role": "assistant", "status_type": "tool_error",
700
+ "function_name": context.function_name, "xml_tag_name": context.xml_tag_name,
701
+ "message": f"Error executing tool {tool_name}: {error_msg}",
702
+ "tool_index": context.tool_index,
703
+ "tool_call_id": context.tool_call.get("id")
704
+ }
705
+ metadata = {"thread_run_id": thread_run_id}
706
+ # Save the status message with is_llm_message=False
707
+ saved_message_obj = self.add_message(
708
+ type="status", content=content, is_llm_message=False, metadata=metadata
709
+ )
710
+ return saved_message_obj