xgae 0.1.10__py3-none-any.whl → 0.1.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of xgae might be problematic. Click here for more details.

xgae/cli_app.py CHANGED
@@ -3,7 +3,7 @@ import sys
3
3
 
4
4
  from xgae.engine.mcp_tool_box import XGAMcpToolBox
5
5
  from xgae.engine.task_engine import XGATaskEngine
6
- from xgae.utils.llm_client import LLMConfig
6
+
7
7
  from xgae.utils.misc import read_file
8
8
 
9
9
  from xgae.utils.setup_env import setup_langfuse, setup_env_logging
@@ -50,9 +50,7 @@ async def cli() -> None:
50
50
  engine = XGATaskEngine(tool_box=tool_box,
51
51
  general_tools=general_tools,
52
52
  custom_tools=custom_tools,
53
- llm_config=LLMConfig(stream=False),
54
- system_prompt=system_prompt,
55
- max_auto_run=8)
53
+ system_prompt=system_prompt)
56
54
 
57
55
  # Two task run in same langfuse trace
58
56
  trace_id = langfuse.trace(name="xgae_cli").trace_id
@@ -6,14 +6,14 @@ class XGAError(Exception):
6
6
  """Custom exception for errors in the XGA system."""
7
7
  pass
8
8
 
9
- XGAMsgStatusType = Literal["error", "finish", "tool_error", "tool_started", "tool_completed", "tool_failed", "thread_run_start", "thread_run_end", "assistant_response_start", "assistant_response_end"]
10
- XGAResponseMsgType = Literal["user", "status", "tool", "assistant", "assistant_complete"]
9
+ XGAMsgStatusType = Literal["error", "finish", "tool_started", "tool_completed", "tool_error", "tool_failed"]
10
+ XGAResponseMsgType = Literal["user", "status", "tool", "assistant", "assistant_chunk"]
11
11
 
12
12
  class XGAResponseMessage(TypedDict, total=False):
13
13
  message_id: str
14
14
  type: XGAResponseMsgType
15
15
  is_llm_message: bool
16
- content: Union[Dict[str, Any], List[Any], str]
16
+ content: Union[Dict[str, Any], str]
17
17
  metadata: Dict[str, Any]
18
18
 
19
19
  class XGATaskResult(TypedDict, total=False):
@@ -113,22 +113,27 @@ class XGAMcpToolBox(XGAToolBox):
113
113
  async with self._mcp_client.session(server_name) as session:
114
114
  tools = await load_mcp_tools(session)
115
115
  mcp_tool = next((t for t in tools if t.name == tool_name), None)
116
-
116
+ is_general_tool = False
117
117
  if mcp_tool:
118
118
  tool_args = args or {}
119
119
  if server_name == self.GENERAL_MCP_SERVER_NAME:
120
120
  tool_args = dict({"task_id": task_id}, **tool_args)
121
+ is_general_tool = True
121
122
 
122
123
  try:
123
124
  tool_result = await mcp_tool.arun(tool_args)
124
- result = XGAToolResult(success=True, output=str(tool_result))
125
+ if is_general_tool:
126
+ tool_result = json.loads(tool_result)
127
+ result = XGAToolResult(success=tool_result['success'], output=str(tool_result['output']))
128
+ else:
129
+ result = XGAToolResult(success=True, output=str(tool_result))
125
130
  except Exception as e:
126
131
  error = f"Call mcp tool '{tool_name}' error: {str(e)}"
127
- logging.error(f"XGAMcpToolBox.call_tool: {error}")
132
+ logging.error(f"McpToolBox call_tool: {error}")
128
133
  result = XGAToolResult(success=False, output=error)
129
134
  else:
130
135
  error = f"No MCP tool found with name: {tool_name}"
131
- logging.info(f"XGAMcpToolBox.call_tool: error={error}")
136
+ logging.info(f"McpToolBox call_tool: error={error}")
132
137
  result = XGAToolResult(success=False, output=error)
133
138
 
134
139
  return result
@@ -179,11 +184,11 @@ class XGAMcpToolBox(XGAToolBox):
179
184
 
180
185
  return server_config
181
186
  else:
182
- logging.warning("MCP servers config file not found at: %s", mcp_config_path)
187
+ logging.warning(f"McpToolBox load_mcp_servers_config: MCP servers config file not found at: {mcp_config_path}")
183
188
  return {"mcpServers": {}}
184
189
 
185
190
  except Exception as e:
186
- logging.error("Failed to load MCP servers config: %s", str(e))
191
+ logging.error(f"McpToolBox load_mcp_servers_config: Failed to load MCP servers config: {e}")
187
192
  return {"mcpServers": {}}
188
193
 
189
194
 
@@ -1,6 +1,8 @@
1
1
  import logging
2
2
 
3
3
  from typing import List, Dict, Any, AsyncGenerator, override,Optional
4
+
5
+ from xgae.utils import log_trace
4
6
  from xgae.utils.json_helpers import format_for_yield
5
7
 
6
8
  from xgae.engine.responser.responser_base import TaskResponseProcessor, TaskResponserContext, TaskRunContinuousState
@@ -11,22 +13,20 @@ class NonStreamTaskResponser(TaskResponseProcessor):
11
13
  super().__init__(response_context)
12
14
 
13
15
  @override
14
- async def process_response(self,llm_response: Any,prompt_messages: List[Dict[str, Any]],
15
- continuous_state: Optional[TaskRunContinuousState] = None) -> AsyncGenerator[Dict[str, Any], None]:
16
+ async def process_response(self,
17
+ llm_response: Any,prompt_messages: List[Dict[str, Any]],
18
+ continuous_state: TaskRunContinuousState
19
+ ) -> AsyncGenerator[Dict[str, Any], None]:
16
20
  llm_content = ""
17
21
  parsed_xml_data = []
18
22
  finish_reason = None
19
23
  llm_count = continuous_state.get("auto_continue_count")
20
24
 
21
25
  try:
22
- # Extract finish_reason, content, tool calls
23
26
  if hasattr(llm_response, 'choices') and llm_response.choices:
24
27
  if hasattr(llm_response.choices[0], 'finish_reason'):
25
28
  finish_reason = llm_response.choices[0].finish_reason
26
- logging.info(f"NonStreamTask:LLM response finish_reason={finish_reason}")
27
-
28
- self.root_span.event(name=f"non_stream_processor_start[{self.task_no}]({llm_count})", level="DEFAULT",
29
- status_message=(f"finish_reason={finish_reason}, tool_exec_strategy={self.tool_execution_strategy}"))
29
+ logging.info(f"NonStreamResp: LLM response finish_reason={finish_reason}")
30
30
 
31
31
  response_message = llm_response.choices[0].message if hasattr(llm_response.choices[0], 'message') else None
32
32
  if response_message:
@@ -35,27 +35,23 @@ class NonStreamTaskResponser(TaskResponseProcessor):
35
35
 
36
36
  parsed_xml_data = self._parse_xml_tool_calls(llm_content)
37
37
  if self.max_xml_tool_calls > 0 and len(parsed_xml_data) > self.max_xml_tool_calls:
38
- logging.warning(f"NonStreamTask:Truncate content, parsed_xml_data length {len(parsed_xml_data)} limit over max_xml_tool_calls={self.max_xml_tool_calls}")
39
- xml_chunks = self._extract_xml_chunks(llm_content)[:self.max_xml_tool_calls]
40
- if xml_chunks:
41
- last_chunk = xml_chunks[-1]
42
- last_chunk_pos = llm_content.find(last_chunk)
43
- if last_chunk_pos >= 0:
44
- llm_content = llm_content[:last_chunk_pos + len(last_chunk)]
38
+ logging.warning(f"NonStreamResp: Truncate content, parsed_xml_data length={len(parsed_xml_data)} limit over max_xml_tool_calls={self.max_xml_tool_calls}")
45
39
  parsed_xml_data = parsed_xml_data[:self.max_xml_tool_calls]
46
40
  finish_reason = "xml_tool_limit_reached"
47
- logging.info(f"NonStreamTask:LLM response finish_reason={finish_reason}")
48
- else:
49
- logging.warning(f"NonStreamTask:LLM response_message is empty")
50
41
 
51
- message_data = {"role": "assistant", "content": llm_content} # index=-1, full llm_content
52
- assistant_msg = self.add_response_message(type="assistant_complete", content=message_data, is_llm_message=True)
42
+ self.root_span.event(name=f"non_stream_processor_start[{self.task_no}]({llm_count})", level="DEFAULT",
43
+ status_message=f"finish_reason={finish_reason}, tool_exec_strategy={self.tool_execution_strategy}, "
44
+ f"parsed_xml_data_len={len(parsed_xml_data)}, llm_content_len={len(llm_content)}")
45
+
46
+ if len(llm_content) == 0:
47
+ logging.warning(f"NonStreamResp: LLM response_message llm_content is empty")
48
+
49
+ message_data = {"role": "assistant", "content": llm_content}
50
+ assistant_msg = self.add_response_message(type="assistant", content=message_data, is_llm_message=True)
53
51
  yield assistant_msg
54
52
 
55
53
  tool_calls_to_execute = [item['tool_call'] for item in parsed_xml_data]
56
54
  if len(tool_calls_to_execute) > 0:
57
- logging.info(f"NonStreamTask:Executing {len(tool_calls_to_execute)} tools with strategy: {self.tool_execution_strategy}")
58
-
59
55
  tool_results = await self._execute_tools(tool_calls_to_execute, self.tool_execution_strategy)
60
56
 
61
57
  tool_index = 0
@@ -81,31 +77,26 @@ class NonStreamTaskResponser(TaskResponseProcessor):
81
77
  if tool_completed_msg["metadata"].get("agent_should_terminate") == "true":
82
78
  finish_reason = "completed"
83
79
  break
80
+
84
81
  tool_index += 1
85
82
  else:
86
83
  finish_reason = "non_tool_call"
87
- logging.warning(f"NonStreamTask: tool_calls is empty, No Tool need to call !")
84
+ logging.warning(f"NonStreamResp: tool_calls is empty, No Tool need to call !")
88
85
 
89
86
  if finish_reason:
90
87
  finish_content = {"status_type": "finish", "finish_reason": finish_reason}
91
- finish_msg_obj = self.add_response_message(type="status", content=finish_content, is_llm_message=False)
92
- if finish_msg_obj:
93
- yield format_for_yield(finish_msg_obj)
94
-
88
+ finish_msg = self.add_response_message(type="status", content=finish_content, is_llm_message=False)
89
+ yield format_for_yield(finish_msg)
95
90
  except Exception as e:
96
- logging.error(f"NonStreamTask: Error processing non-streaming response: {llm_content}")
97
- self.root_span.event(name="error_processing_non_streaming_response", level="ERROR",
98
- status_message=(f"Error processing non-streaming response: {str(e)}"))
99
-
100
- content = {"role": "system", "status_type": "error", "message": str(e)}
101
- err_msg = self.add_response_message(ype="status", content=content,is_llm_message=False)
102
- if err_msg:
103
- yield format_for_yield(err_msg)
104
-
105
- # Re-raise the same exception (not a new one) to ensure proper error propagation
106
- logging.critical(f"NonStreamTask: Re-raising error to stop further processing: {str(e)}")
107
- self.root_span.event(name="re_raising_error_to_stop_further_processing", level="CRITICAL",
108
- status_message=(f"Re-raising error to stop further processing: {str(e)}"))
91
+ trace = log_trace(e, f"NonStreamResp: Process response llm_content:\n {llm_content}")
92
+ self.root_span.event(name="non_stream_process_response_error", level="ERROR",
93
+ status_message=f"Process non-streaming response error: {e}",
94
+ metadata={"content": llm_content, "trace": trace})
95
+
96
+ content = {"role": "system", "status_type": "error", "message": f"Process non-streaming response error: {e}"}
97
+ error_msg = self.add_response_message(type="status", content=content, is_llm_message=False)
98
+ yield format_for_yield(error_msg)
99
+
109
100
  raise # Use bare 'raise' to preserve the original exception with its traceback
110
101
 
111
102
 
@@ -6,6 +6,7 @@ from abc import ABC, abstractmethod
6
6
  from dataclasses import dataclass
7
7
  from typing import List, Dict, Any, Optional, Tuple, Union, Literal, Callable, TypedDict, AsyncGenerator
8
8
 
9
+ from xgae.utils import log_trace
9
10
  from xgae.utils.json_helpers import safe_json_parse
10
11
  from xgae.utils.xml_tool_parser import XMLToolParser
11
12
 
@@ -26,9 +27,11 @@ class TaskResponserContext(TypedDict, total=False):
26
27
  task_no: int
27
28
  model_name: str
28
29
  max_xml_tool_calls: int # LLM generate max_xml_tool limit, 0 is no limit
30
+ use_assistant_chunk_msg: bool
29
31
  tool_execution_strategy: ToolExecutionStrategy
30
32
  xml_adding_strategy: XmlAddingStrategy
31
33
  add_response_msg_func: Callable
34
+ create_response_msg_func: Callable
32
35
  tool_box: XGAToolBox
33
36
  task_langfuse: XGATaskLangFuse
34
37
 
@@ -37,6 +40,7 @@ class TaskRunContinuousState(TypedDict, total=False):
37
40
  accumulated_content: str
38
41
  auto_continue_count: int
39
42
  auto_continue: bool
43
+ assistant_msg_sequence: int
40
44
 
41
45
 
42
46
  @dataclass
@@ -66,6 +70,7 @@ class TaskResponseProcessor(ABC):
66
70
  task_langfuse = response_context.get("task_langfuse")
67
71
  self.root_span = task_langfuse.root_span
68
72
  self.add_response_message = response_context.get("add_response_msg_func")
73
+ self.create_response_message = response_context.get("create_response_msg_func")
69
74
 
70
75
  self.tool_box = response_context.get("tool_box")
71
76
  self.xml_parser = XMLToolParser()
@@ -75,7 +80,7 @@ class TaskResponseProcessor(ABC):
75
80
  async def process_response(self,
76
81
  llm_response: AsyncGenerator,
77
82
  prompt_messages: List[Dict[str, Any]],
78
- continuous_state: Optional[TaskRunContinuousState] = None
83
+ continuous_state: TaskRunContinuousState
79
84
  ) -> AsyncGenerator[Dict[str, Any], None]:
80
85
  pass
81
86
 
@@ -119,9 +124,8 @@ class TaskResponseProcessor(ABC):
119
124
 
120
125
  # Find the earliest occurrence of any registered tool function name
121
126
  # Check for available function names
122
- #available_functions = self.tool_registry.get_available_functions()
123
- available_functions = self.tool_box.get_task_tool_names(self.task_id)
124
- for func_name in available_functions:
127
+ available_func_names = self.tool_box.get_task_tool_names(self.task_id)
128
+ for func_name in available_func_names:
125
129
  # Convert function name to potential tag name (underscore to dash)
126
130
  tag_name = func_name.replace('_', '-')
127
131
  start_pattern = f'<{tag_name}'
@@ -169,12 +173,11 @@ class TaskResponseProcessor(ABC):
169
173
  break
170
174
 
171
175
  pos = max(pos + 1, current_pos)
172
-
173
176
  except Exception as e:
174
- logging.error(f"Error extracting XML chunks: {e}")
175
- logging.error(f"Content was: {content}")
176
- self.root_span.event(name="error_extracting_xml_chunks", level="ERROR",
177
- status_message=(f"Error extracting XML chunks: {e}"), metadata={"content": content})
177
+ trace = log_trace(e, f"TaskProcessor extract_xml_chunks: Error extracting XML chunks: {content}")
178
+ self.root_span.event(name="task_process_extract_xml_chunk_error", level="ERROR",
179
+ status_message=f"Error extracting XML chunks: {e}",
180
+ metadata={"content": content, "trace": trace})
178
181
 
179
182
  return chunks
180
183
 
@@ -193,13 +196,13 @@ class TaskResponseProcessor(ABC):
193
196
  parsed_calls = self.xml_parser.parse_content(xml_chunk)
194
197
 
195
198
  if not parsed_calls:
196
- logging.error(f"No tool calls found in XML chunk: {xml_chunk}")
199
+ logging.error(f"TaskProcessor parse_xml_tool_call: No tool calls found in XML chunk: {xml_chunk}")
197
200
  return None
198
201
 
199
202
  # Take the first tool call (should only be one per chunk)
200
203
  xml_tool_call = parsed_calls[0]
201
204
  if not xml_tool_call.function_name:
202
- logging.error(f"xml_tool_call function name is empty: {xml_tool_call}")
205
+ logging.error(f"TaskProcessor parse_xml_tool_call: xml_tool_call function name is empty: {xml_tool_call}")
203
206
  return None
204
207
 
205
208
  # Convert to the expected format
@@ -217,14 +220,12 @@ class TaskResponseProcessor(ABC):
217
220
  return tool_call, parsing_details
218
221
 
219
222
  # If not the expected <function_calls><invoke> format, return None
220
- logging.error(f"XML chunk does not contain expected <function_calls><invoke> format: {xml_chunk}")
221
- return None
222
-
223
+ logging.error(f"TaskProcessor parse_xml_tool_call: XML chunk does not contain expected <function_calls><invoke> format: {xml_chunk}")
223
224
  except Exception as e:
224
- logging.error(f"Error parsing XML chunk: {e}")
225
- logging.error(f"XML chunk was: {xml_chunk}")
226
- self.root_span.event(name="error_parsing_xml_chunk", level="ERROR",
227
- status_message=(f"Error parsing XML chunk: {e}"), metadata={"xml_chunk": xml_chunk})
225
+ trace = log_trace(e, f"TaskProcessor parse_xml_tool_call: Error parsing XML chunk: {xml_chunk}")
226
+ self.root_span.event(name="task_process_parsing_xml_chunk_error", level="ERROR",
227
+ status_message=f"Error parsing XML chunk: {e}",
228
+ metadata={"xml_chunk": xml_chunk, "trace": trace})
228
229
  return None
229
230
 
230
231
  def _parse_xml_tool_calls(self, content: str) -> List[Dict[str, Any]]:
@@ -234,7 +235,7 @@ class TaskResponseProcessor(ABC):
234
235
  List of dictionaries, each containing {'tool_call': ..., 'parsing_details': ...}
235
236
  """
236
237
  parsed_data = []
237
-
238
+ xml_chunk = None
238
239
  try:
239
240
  xml_chunks = self._extract_xml_chunks(content)
240
241
 
@@ -246,64 +247,65 @@ class TaskResponseProcessor(ABC):
246
247
  "tool_call": tool_call,
247
248
  "parsing_details": parsing_details
248
249
  })
249
-
250
250
  except Exception as e:
251
- logging.error(f"Error parsing XML tool calls: {e}", exc_info=True)
252
- self.root_span.event(name="error_parsing_xml_tool_calls", level="ERROR",
253
- status_message=(f"Error parsing XML tool calls: {e}"), metadata={"content": content})
251
+ trace = log_trace(e, f"TaskProcessor parse_xml_tool_calls: Error parsing XML tool calls, xml_chunk: {xml_chunk}")
252
+ self.root_span.event(name="task_process_parse_xml_tool_calls_error", level="ERROR",
253
+ status_message=f"Error parsing XML tool calls: {e}",
254
+ metadata={"content": xml_chunk, "trace": trace})
254
255
 
255
256
  return parsed_data
256
257
 
257
258
 
258
259
  async def _execute_tool(self, tool_call: Dict[str, Any]) -> XGAToolResult:
259
260
  """Execute a single tool call and return the result."""
260
- exec_tool_span = self.root_span.span(name=f"execute_tool.{tool_call['function_name']}", input=tool_call["arguments"])
261
+ function_name = tool_call.get("function_name", "empty_function")
262
+ exec_tool_span = self.root_span.span(name=f"execute_tool.{function_name}", input=tool_call["arguments"])
261
263
  try:
262
- function_name = tool_call["function_name"]
263
264
  arguments = tool_call["arguments"]
264
-
265
- logging.info(f"Executing tool: {function_name} with arguments: {arguments}")
266
-
267
265
  if isinstance(arguments, str):
268
266
  try:
269
267
  arguments = safe_json_parse(arguments)
270
268
  except json.JSONDecodeError:
271
- arguments = {"text": arguments} # @todo modify
269
+ logging.warning(f"TaskProcessor execute_tool: Tool '{function_name}' arguments is not dict type, args={arguments}")
270
+ arguments = {"text": arguments} # useless
272
271
 
273
272
  result = None
274
273
  available_tool_names = self.tool_box.get_task_tool_names(self.task_id)
275
274
  if function_name in available_tool_names:
275
+ logging.info(f"TaskProcessor execute_tool: Tool '{function_name}' executing, args={arguments}")
276
276
  result = await self.tool_box.call_tool(self.task_id, function_name, arguments)
277
277
  else:
278
- logging.error(f"Tool function '{function_name}' not found in registry")
278
+ logging.error(f"TaskProcessor execute_tool: Tool function '{function_name}' not found in toolbox")
279
279
  result = XGAToolResult(success=False, output=f"Tool function '{function_name}' not found")
280
- logging.info(f"Tool execution complete: {function_name} -> {result}")
280
+
281
+ logging.info(f"TaskProcessor execute_tool: Tool '{function_name}' execution complete, result: {result}")
281
282
  exec_tool_span.update(status_message="tool_executed", output=result)
282
283
 
283
284
  return result
284
285
  except Exception as e:
285
- logging.error(f"Error executing tool {tool_call['function_name']}: {str(e)}", exc_info=True)
286
+ trace = log_trace(e, f"TaskProcessor execute_tool: Executing tool {function_name}")
286
287
 
287
- exec_tool_span.update(status_message="tool_execution_error", output=f"Error executing tool: {str(e)}", level="ERROR")
288
- return XGAToolResult(success=False, output=f"Error executing tool: {str(e)}")
288
+ exec_tool_span.update(status_message="task_process_tool_exec_error", level="ERROR",
289
+ output=f"Error executing tool {function_name}, error: {str(e)}",
290
+ metadata={"trace": trace})
289
291
 
290
- async def _execute_tools(
291
- self,
292
- tool_calls: List[Dict[str, Any]],
293
- execution_strategy: ToolExecutionStrategy = "sequential"
294
- ) -> List[Tuple[Dict[str, Any], XGAToolResult]]:
295
- logging.info(f"Executing {len(tool_calls)} tools with strategy: {execution_strategy}")
292
+ return XGAToolResult(success=False, output=f"Executing tool {function_name}, error: {str(e)}")
293
+
294
+ async def _execute_tools(self, tool_calls: List[Dict[str, Any]],
295
+ execution_strategy: ToolExecutionStrategy = "sequential"
296
+ ) -> List[Tuple[Dict[str, Any], XGAToolResult]]:
297
+ logging.info(f"TaskProcessor execute_tools: Executing {len(tool_calls)} tools with strategy '{execution_strategy}'")
296
298
 
297
299
  if execution_strategy == "sequential":
298
300
  return await self._execute_tools_sequentially(tool_calls)
299
301
  elif execution_strategy == "parallel":
300
302
  return await self._execute_tools_in_parallel(tool_calls)
301
303
  else:
302
- logging.warning(f"Unknown execution strategy: {execution_strategy}, falling back to sequential")
304
+ logging.warning(f"TaskProcessor execute_tools: Unknown execution strategy '{execution_strategy}', use sequential")
303
305
  return await self._execute_tools_sequentially(tool_calls)
304
306
 
305
- async def _execute_tools_sequentially(self, tool_calls: List[Dict[str, Any]]) -> List[
306
- Tuple[Dict[str, Any], XGAToolResult]]:
307
+ # @todo refact below code
308
+ async def _execute_tools_sequentially(self, tool_calls: List[Dict[str, Any]]) -> List[Tuple[Dict[str, Any], XGAToolResult]]:
307
309
  """Execute tool calls sequentially and return results.
308
310
 
309
311
  This method executes tool calls one after another, waiting for each tool to complete
@@ -317,9 +319,10 @@ class TaskResponseProcessor(ABC):
317
319
  """
318
320
  if not tool_calls:
319
321
  return []
322
+
320
323
  tool_names = [t.get('function_name', 'unknown') for t in tool_calls]
321
324
  logging.info(f"Executing {len(tool_calls)} tools sequentially: {tool_names}")
322
- self.root_span.event(name="executing_tools_sequentially", level="DEFAULT",
325
+ self.root_span.event(name="task_process_executing_tools_sequentially", level="DEFAULT",
323
326
  status_message=(f"Executing {len(tool_calls)} tools sequentially: {tool_names}"))
324
327
 
325
328
  results = []
@@ -341,7 +344,7 @@ class TaskResponseProcessor(ABC):
341
344
 
342
345
  except Exception as e:
343
346
  logging.error(f"Error executing tool {tool_name}: {str(e)}")
344
- self.root_span.event(name="error_executing_tool", level="ERROR",
347
+ self.root_span.event(name="task_process_error_executing_tool", level="ERROR",
345
348
  status_message=(f"Error executing tool {tool_name}: {str(e)}"))
346
349
  error_result = XGAToolResult(success=False, output=f"Error executing tool: {str(e)}")
347
350
  results.append((tool_call, error_result))
@@ -373,7 +376,7 @@ class TaskResponseProcessor(ABC):
373
376
  for i, (tool_call, result) in enumerate(zip(tool_calls, results)):
374
377
  if isinstance(result, Exception):
375
378
  logging.error(f"Error executing tool {tool_call.get('function_name', 'unknown')}: {str(result)}")
376
- self.root_span.event(name="error_executing_tool", level="ERROR", status_message=(
379
+ self.root_span.event(name="task_process_error_executing_tool", level="ERROR", status_message=(
377
380
  f"Error executing tool {tool_call.get('function_name', 'unknown')}: {str(result)}"))
378
381
  # Create error result
379
382
  error_result = XGAToolResult(success=False, output=f"Error executing tool: {str(result)}")
@@ -388,7 +391,7 @@ class TaskResponseProcessor(ABC):
388
391
 
389
392
  except Exception as e:
390
393
  logging.error(f"Error in parallel tool execution: {str(e)}", exc_info=True)
391
- self.root_span.event(name="error_in_parallel_tool_execution", level="ERROR",
394
+ self.root_span.event(name="task_process_error_in_parallel_tool_execution", level="ERROR",
392
395
  status_message=(f"Error in parallel tool execution: {str(e)}"))
393
396
  # Return error results for all tools if the gather itself fails
394
397
  return [(tool_call, XGAToolResult(success=False, output=f"Execution error: {str(e)}"))
@@ -459,7 +462,7 @@ class TaskResponseProcessor(ABC):
459
462
  return message_obj # Return the modified message object
460
463
  except Exception as e:
461
464
  logging.error(f"Error adding tool result: {str(e)}", exc_info=True)
462
- self.root_span.event(name="error_adding_tool_result", level="ERROR",
465
+ self.root_span.event(name="task_process_error_adding_tool_result", level="ERROR",
463
466
  status_message=(f"Error adding tool result: {str(e)}"),
464
467
  metadata={"tool_call": tool_call, "result": result, "strategy": strategy,
465
468
  "assistant_message_id": assistant_message_id,
@@ -479,7 +482,7 @@ class TaskResponseProcessor(ABC):
479
482
  return message_obj # Return the full message object
480
483
  except Exception as e2:
481
484
  logging.error(f"Failed even with fallback message: {str(e2)}", exc_info=True)
482
- self.root_span.event(name="failed_even_with_fallback_message", level="ERROR",
485
+ self.root_span.event(name="task_process_failed_even_with_fallback_message", level="ERROR",
483
486
  status_message=(f"Failed even with fallback message: {str(e2)}"),
484
487
  metadata={"tool_call": tool_call, "result": result, "strategy": strategy,
485
488
  "assistant_message_id": assistant_message_id,