xgae 0.1.12__py3-none-any.whl → 0.1.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of xgae might be problematic. Click here for more details.

@@ -113,15 +113,20 @@ class XGAMcpToolBox(XGAToolBox):
113
113
  async with self._mcp_client.session(server_name) as session:
114
114
  tools = await load_mcp_tools(session)
115
115
  mcp_tool = next((t for t in tools if t.name == tool_name), None)
116
-
116
+ is_general_tool = False
117
117
  if mcp_tool:
118
118
  tool_args = args or {}
119
119
  if server_name == self.GENERAL_MCP_SERVER_NAME:
120
120
  tool_args = dict({"task_id": task_id}, **tool_args)
121
+ is_general_tool = True
121
122
 
122
123
  try:
123
124
  tool_result = await mcp_tool.arun(tool_args)
124
- result = XGAToolResult(success=True, output=str(tool_result))
125
+ if is_general_tool:
126
+ tool_result = json.loads(tool_result)
127
+ result = XGAToolResult(success=tool_result['success'], output=str(tool_result['output']))
128
+ else:
129
+ result = XGAToolResult(success=True, output=str(tool_result))
125
130
  except Exception as e:
126
131
  error = f"Call mcp tool '{tool_name}' error: {str(e)}"
127
132
  logging.error(f"McpToolBox call_tool: {error}")
@@ -195,7 +200,7 @@ if __name__ == "__main__":
195
200
  setup_logging()
196
201
 
197
202
  async def main():
198
- ## Before Run Exec: uv run custom_fault_tools
203
+ ## Before Run Exec: uv run example-fault-tools
199
204
  mcp_tool_box = XGAMcpToolBox(custom_mcp_server_file="mcpservers/custom_servers.json")
200
205
  #mcp_tool_box = XGAMcpToolBox()
201
206
 
@@ -2,7 +2,7 @@ import logging
2
2
 
3
3
  from typing import List, Dict, Any, AsyncGenerator, override,Optional
4
4
 
5
- from xgae.utils import handle_error
5
+ from xgae.utils import log_trace
6
6
  from xgae.utils.json_helpers import format_for_yield
7
7
 
8
8
  from xgae.engine.responser.responser_base import TaskResponseProcessor, TaskResponserContext, TaskRunContinuousState
@@ -61,8 +61,7 @@ class NonStreamTaskResponser(TaskResponseProcessor):
61
61
  parsing_details = parsed_xml_item['parsing_details']
62
62
  assistant_msg_id = assistant_msg['message_id'] if assistant_msg else None
63
63
 
64
- tool_context = self._create_tool_context(tool_call, tool_index, assistant_msg_id, parsing_details)
65
- tool_context.result = tool_result
64
+ tool_context = self._create_tool_context(tool_call, tool_index, assistant_msg_id, parsing_details, tool_result)
66
65
 
67
66
  tool_start_msg = self._add_tool_start_message(tool_context)
68
67
  yield format_for_yield(tool_start_msg)
@@ -88,11 +87,10 @@ class NonStreamTaskResponser(TaskResponseProcessor):
88
87
  finish_msg = self.add_response_message(type="status", content=finish_content, is_llm_message=False)
89
88
  yield format_for_yield(finish_msg)
90
89
  except Exception as e:
91
- logging.error(f"NonStreamResp: Process response llm_content: {llm_content}")
92
- handle_error(e)
90
+ trace = log_trace(e, f"NonStreamResp: Process response llm_content:\n {llm_content}")
93
91
  self.root_span.event(name="non_stream_process_response_error", level="ERROR",
94
92
  status_message=f"Process non-streaming response error: {e}",
95
- metadata={"content": llm_content})
93
+ metadata={"content": llm_content, "trace": trace})
96
94
 
97
95
  content = {"role": "system", "status_type": "error", "message": f"Process non-streaming response error: {e}"}
98
96
  error_msg = self.add_response_message(type="status", content=content, is_llm_message=False)
@@ -6,7 +6,7 @@ from abc import ABC, abstractmethod
6
6
  from dataclasses import dataclass
7
7
  from typing import List, Dict, Any, Optional, Tuple, Union, Literal, Callable, TypedDict, AsyncGenerator
8
8
 
9
- from xgae.utils import handle_error
9
+ from xgae.utils import log_trace
10
10
  from xgae.utils.json_helpers import safe_json_parse
11
11
  from xgae.utils.xml_tool_parser import XMLToolParser
12
12
 
@@ -29,6 +29,7 @@ class TaskResponserContext(TypedDict, total=False):
29
29
  max_xml_tool_calls: int # LLM generate max_xml_tool limit, 0 is no limit
30
30
  use_assistant_chunk_msg: bool
31
31
  tool_execution_strategy: ToolExecutionStrategy
32
+ tool_execute_on_stream: bool
32
33
  xml_adding_strategy: XmlAddingStrategy
33
34
  add_response_msg_func: Callable
34
35
  create_response_msg_func: Callable
@@ -48,8 +49,8 @@ class ToolExecutionContext:
48
49
  """Context for a tool execution including call details, result, and display info."""
49
50
  tool_call: Dict[str, Any]
50
51
  tool_index: int
51
- result: Optional[XGAToolResult] = None
52
52
  function_name: Optional[str] = None
53
+ result: Optional[XGAToolResult] = None
53
54
  xml_tag_name: Optional[str] = None
54
55
  error: Optional[Exception] = None
55
56
  assistant_message_id: Optional[str] = None
@@ -66,6 +67,7 @@ class TaskResponseProcessor(ABC):
66
67
  self.tool_execution_strategy = self.response_context.get("tool_execution_strategy", "parallel")
67
68
  self.xml_adding_strategy = self.response_context.get("xml_adding_strategy", "user_message")
68
69
  self.max_xml_tool_calls = self.response_context.get("max_xml_tool_calls", 0)
70
+ self.tool_execute_on_stream = response_context.get("tool_execute_on_stream", False)
69
71
 
70
72
  task_langfuse = response_context.get("task_langfuse")
71
73
  self.root_span = task_langfuse.root_span
@@ -174,10 +176,10 @@ class TaskResponseProcessor(ABC):
174
176
 
175
177
  pos = max(pos + 1, current_pos)
176
178
  except Exception as e:
177
- logging.error(f"TaskProcessor extract_xml_chunks: Error extracting XML chunks: {content}")
178
- handle_error(e)
179
+ trace = log_trace(e, f"TaskProcessor extract_xml_chunks: Error extracting XML chunks: {content}")
179
180
  self.root_span.event(name="task_process_extract_xml_chunk_error", level="ERROR",
180
- status_message=(f"Error extracting XML chunks: {e}"), metadata={"content": content})
181
+ status_message=f"Error extracting XML chunks: {e}",
182
+ metadata={"content": content, "trace": trace})
181
183
 
182
184
  return chunks
183
185
 
@@ -197,13 +199,13 @@ class TaskResponseProcessor(ABC):
197
199
 
198
200
  if not parsed_calls:
199
201
  logging.error(f"TaskProcessor parse_xml_tool_call: No tool calls found in XML chunk: {xml_chunk}")
200
- return
202
+ return None
201
203
 
202
204
  # Take the first tool call (should only be one per chunk)
203
205
  xml_tool_call = parsed_calls[0]
204
206
  if not xml_tool_call.function_name:
205
207
  logging.error(f"TaskProcessor parse_xml_tool_call: xml_tool_call function name is empty: {xml_tool_call}")
206
- return
208
+ return None
207
209
 
208
210
  # Convert to the expected format
209
211
  tool_call = {
@@ -222,10 +224,11 @@ class TaskResponseProcessor(ABC):
222
224
  # If not the expected <function_calls><invoke> format, return None
223
225
  logging.error(f"TaskProcessor parse_xml_tool_call: XML chunk does not contain expected <function_calls><invoke> format: {xml_chunk}")
224
226
  except Exception as e:
225
- logging.error(f"TaskProcessor parse_xml_tool_call: Error parsing XML chunk: {xml_chunk}")
226
- handle_error(e)
227
+ trace = log_trace(e, f"TaskProcessor parse_xml_tool_call: Error parsing XML chunk: {xml_chunk}")
227
228
  self.root_span.event(name="task_process_parsing_xml_chunk_error", level="ERROR",
228
- status_message=(f"Error parsing XML chunk: {e}"), metadata={"xml_chunk": xml_chunk})
229
+ status_message=f"Error parsing XML chunk: {e}",
230
+ metadata={"xml_chunk": xml_chunk, "trace": trace})
231
+ return None
229
232
 
230
233
  def _parse_xml_tool_calls(self, content: str) -> List[Dict[str, Any]]:
231
234
  """Parse XML tool calls from content string.
@@ -247,10 +250,10 @@ class TaskResponseProcessor(ABC):
247
250
  "parsing_details": parsing_details
248
251
  })
249
252
  except Exception as e:
250
- logging.warning(f"TaskProcessor parse_xml_tool_calls: Error parsing XML tool calls, xml_chunk: {xml_chunk}")
251
- handle_error(e)
253
+ trace = log_trace(e, f"TaskProcessor parse_xml_tool_calls: Error parsing XML tool calls, xml_chunk: {xml_chunk}")
252
254
  self.root_span.event(name="task_process_parse_xml_tool_calls_error", level="ERROR",
253
- status_message=(f"Error parsing XML tool calls: {e}"), metadata={"content": xml_chunk})
255
+ status_message=f"Error parsing XML tool calls: {e}",
256
+ metadata={"content": xml_chunk, "trace": trace})
254
257
 
255
258
  return parsed_data
256
259
 
@@ -261,313 +264,243 @@ class TaskResponseProcessor(ABC):
261
264
  exec_tool_span = self.root_span.span(name=f"execute_tool.{function_name}", input=tool_call["arguments"])
262
265
  try:
263
266
  arguments = tool_call["arguments"]
264
-
265
- logging.info(f"TaskProcessor execute_tool: Executing tool: {function_name} with arguments: {arguments}")
266
-
267
267
  if isinstance(arguments, str):
268
268
  try:
269
269
  arguments = safe_json_parse(arguments)
270
270
  except json.JSONDecodeError:
271
- arguments = {"text": arguments} # @todo modify
271
+ logging.warning(f"TaskProcessor execute_tool: Tool '{function_name}' arguments is not dict type, args={arguments}")
272
+ arguments = {"text": arguments} # useless
272
273
 
273
274
  result = None
274
275
  available_tool_names = self.tool_box.get_task_tool_names(self.task_id)
275
276
  if function_name in available_tool_names:
277
+ logging.info(f"TaskProcessor execute_tool: Tool '{function_name}' executing, args={arguments}")
276
278
  result = await self.tool_box.call_tool(self.task_id, function_name, arguments)
277
279
  else:
278
280
  logging.error(f"TaskProcessor execute_tool: Tool function '{function_name}' not found in toolbox")
279
281
  result = XGAToolResult(success=False, output=f"Tool function '{function_name}' not found")
280
282
 
281
- logging.info(f"TaskProcessor execute_tool: Tool execution complete: {function_name} -> {result}")
283
+ logging.info(f"TaskProcessor execute_tool: Tool '{function_name}' execution complete, result: {result}")
282
284
  exec_tool_span.update(status_message="tool_executed", output=result)
283
285
 
284
286
  return result
285
287
  except Exception as e:
286
- logging.error(f"TaskProcessor execute_tool: Error executing tool {function_name}")
288
+ trace = log_trace(e, f"TaskProcessor execute_tool: Executing tool {function_name}")
287
289
 
288
290
  exec_tool_span.update(status_message="task_process_tool_exec_error", level="ERROR",
289
- output=f"Error executing tool {function_name}, error: {str(e)}")
291
+ output=f"Error executing tool {function_name}, error: {str(e)}",
292
+ metadata={"trace": trace})
293
+
290
294
  return XGAToolResult(success=False, output=f"Executing tool {function_name}, error: {str(e)}")
291
295
 
292
- async def _execute_tools(
293
- self,
294
- tool_calls: List[Dict[str, Any]],
295
- execution_strategy: ToolExecutionStrategy = "sequential"
296
- ) -> List[Tuple[Dict[str, Any], XGAToolResult]]:
297
- logging.info(f"Executing {len(tool_calls)} tools with strategy: {execution_strategy}")
298
296
 
297
+ async def _execute_tools(self, tool_calls: List[Dict[str, Any]],
298
+ execution_strategy: ToolExecutionStrategy = "sequential"
299
+ ) -> List[Tuple[Dict[str, Any], XGAToolResult]]:
299
300
  if execution_strategy == "sequential":
300
301
  return await self._execute_tools_sequentially(tool_calls)
301
302
  elif execution_strategy == "parallel":
302
303
  return await self._execute_tools_in_parallel(tool_calls)
303
304
  else:
304
- logging.warning(f"Unknown execution strategy: {execution_strategy}, falling back to sequential")
305
+ logging.warning(f"TaskProcessor execute_tools: Unknown execution strategy '{execution_strategy}', use sequential")
305
306
  return await self._execute_tools_sequentially(tool_calls)
306
307
 
307
- async def _execute_tools_sequentially(self, tool_calls: List[Dict[str, Any]]) -> List[
308
- Tuple[Dict[str, Any], XGAToolResult]]:
308
+
309
+ async def _execute_tools_sequentially(self, tool_calls: List[Dict[str, Any]]) -> List[Tuple[Dict[str, Any], XGAToolResult]]:
309
310
  """Execute tool calls sequentially and return results.
310
311
 
311
312
  This method executes tool calls one after another, waiting for each tool to complete
312
313
  before starting the next one. This is useful when tools have dependencies on each other.
313
314
 
314
- Args:
315
- tool_calls: List of tool calls to execute
316
-
317
315
  Returns:
318
316
  List of tuples containing the original tool call and its result
319
317
  """
320
318
  if not tool_calls:
319
+ logging.warning("TaskProcessor execute_tools_sequentially: tool_calls is empty")
321
320
  return []
321
+
322
322
  tool_names = [t.get('function_name', 'unknown') for t in tool_calls]
323
- logging.info(f"Executing {len(tool_calls)} tools sequentially: {tool_names}")
324
- self.root_span.event(name="task_process_executing_tools_sequentially", level="DEFAULT",
325
- status_message=(f"Executing {len(tool_calls)} tools sequentially: {tool_names}"))
323
+ tool_num = len(tool_calls)
324
+ if tool_num > 1:
325
+ logging.info(f"TaskProcessor execute_tools_sequentially: Executing {tool_num} tools sequentially: {tool_names}")
326
+ self.root_span.event(name="task_process_execute_tools_sequentially", level="DEFAULT",
327
+ status_message=f"Executing {len(tool_calls)} tools sequentially: {tool_names}")
326
328
 
327
329
  results = []
328
330
  for index, tool_call in enumerate(tool_calls):
329
331
  tool_name = tool_call.get('function_name', 'unknown')
330
- logging.debug(f"Executing tool {index + 1}/{len(tool_calls)}: {tool_name}")
331
-
332
- try:
333
- result = await self._execute_tool(tool_call)
334
- results.append((tool_call, result))
335
- logging.debug(f"Completed tool {tool_name} with success={result.success}")
336
-
337
- # Check if this is a terminating tool (ask or complete)
338
- if tool_name in ['ask', 'complete']:
339
- logging.info(f"Terminating tool '{tool_name}' executed. Stopping further tool execution.")
340
- # self.root_span.event(name="terminating_tool_executed",
341
- # level="DEFAULT", status_message=(f"Terminating tool '{tool_name}' executed. Stopping further tool execution."))
342
- break # Stop executing remaining tools
343
-
344
- except Exception as e:
345
- logging.error(f"Error executing tool {tool_name}: {str(e)}")
346
- self.root_span.event(name="task_process_error_executing_tool", level="ERROR",
347
- status_message=(f"Error executing tool {tool_name}: {str(e)}"))
348
- error_result = XGAToolResult(success=False, output=f"Error executing tool: {str(e)}")
349
- results.append((tool_call, error_result))
350
-
351
- logging.info(f"Sequential execution completed for {len(results)} tools (out of {len(tool_calls)} total)")
352
- # self.root_span.event(name="sequential_execution_completed", level="DEFAULT",
353
- # status_message=(f"Sequential execution completed for {len(results)} tools (out of {len(tool_calls)} total)"))
332
+ logging.info(f"TaskProcessor execute_tools_sequentially: Executing tool '{tool_name}', sequence={index + 1}/{tool_num}")
333
+ result = await self._execute_tool(tool_call)
334
+ results.append((tool_call, result))
335
+
336
+ # Check if this is a terminating tool (ask or complete)
337
+ if tool_name in ['ask', 'complete']:
338
+ if len(results) < tool_num:
339
+ logging.info(f"TaskProcessor execute_tools_sequentially: Terminating tool '{tool_name}' executed, Stopping further tool execution.")
340
+ self.root_span.event(name="task_process_terminate_tool_executed", level="DEFAULT",
341
+ status_message=f"Terminating tool '{tool_name}' executed, Stopping further tool execution.")
342
+ break
343
+
344
+ logging.info(f"TaskProcessor execute_tools_sequentially: Execution completed for {len(results)} tools, total {tool_num} tools)")
354
345
  return results
355
346
 
356
347
 
357
348
  async def _execute_tools_in_parallel(self, tool_calls: List[Dict[str, Any]]) -> List[Tuple[Dict[str, Any], XGAToolResult]]:
349
+ """Execute tool calls in parallel and return results.
350
+
351
+ This method executes all tool calls simultaneously using asyncio.gather, which
352
+ can significantly improve performance when executing multiple independent tools.
353
+
354
+ Returns:
355
+ List of tuples containing the original tool call and its result
356
+ """
358
357
  if not tool_calls:
358
+ logging.warning("TaskProcessor execute_tools_in_parallel: tool_calls is empty")
359
359
  return []
360
360
 
361
- try:
362
- tool_names = [t.get('function_name', 'unknown') for t in tool_calls]
363
- logging.info(f"Executing {len(tool_calls)} tools in parallel: {tool_names}")
364
- # self.root_span.event(name="executing_tools_in_parallel", level="DEFAULT",
365
- # status_message=(f"Executing {len(tool_calls)} tools in parallel: {tool_names}"))
366
-
367
- # Create tasks for all tool calls
368
- tasks = [self._execute_tool(tool_call) for tool_call in tool_calls]
369
-
370
- # Execute all tasks concurrently with error handling
371
- results = await asyncio.gather(*tasks, return_exceptions=True)
372
-
373
- # Process results and handle any exceptions
374
- processed_results = []
375
- for i, (tool_call, result) in enumerate(zip(tool_calls, results)):
376
- if isinstance(result, Exception):
377
- logging.error(f"Error executing tool {tool_call.get('function_name', 'unknown')}: {str(result)}")
378
- self.root_span.event(name="task_process_error_executing_tool", level="ERROR", status_message=(
379
- f"Error executing tool {tool_call.get('function_name', 'unknown')}: {str(result)}"))
380
- # Create error result
381
- error_result = XGAToolResult(success=False, output=f"Error executing tool: {str(result)}")
382
- processed_results.append((tool_call, error_result))
383
- else:
384
- processed_results.append((tool_call, result))
385
-
386
- logging.info(f"Parallel execution completed for {len(tool_calls)} tools")
387
- # self.root_span.event(name="parallel_execution_completed", level="DEFAULT",
388
- # status_message=(f"Parallel execution completed for {len(tool_calls)} tools"))
389
- return processed_results
361
+ tool_names = [t.get('function_name', 'unknown') for t in tool_calls]
362
+ tool_num = len(tool_calls)
363
+ if tool_num > 1:
364
+ logging.info(f"TaskProcessor execute_tools_in_parallel: Executing {tool_num} tools sequentially: {tool_names}")
365
+ self.root_span.event(name="task_process_execute_tools_parallel", level="DEFAULT",
366
+ status_message=f"Executing {len(tool_calls)} tools parallelly: {tool_names}")
390
367
 
391
- except Exception as e:
392
- logging.error(f"Error in parallel tool execution: {str(e)}", exc_info=True)
393
- self.root_span.event(name="task_process_error_in_parallel_tool_execution", level="ERROR",
394
- status_message=(f"Error in parallel tool execution: {str(e)}"))
395
- # Return error results for all tools if the gather itself fails
396
- return [(tool_call, XGAToolResult(success=False, output=f"Execution error: {str(e)}"))
397
- for tool_call in tool_calls]
398
-
399
- def _add_tool_messsage(
400
- self,
401
- tool_call: Dict[str, Any],
402
- result: XGAToolResult,
403
- strategy: XmlAddingStrategy = "assistant_message",
404
- assistant_message_id: Optional[str] = None,
405
- parsing_details: Optional[Dict[str, Any]] = None
406
- ) -> Optional[Dict[str, Any]]: # Return the full message object
407
- try:
408
- message_obj = None # Initialize message_obj
409
-
410
- # Create metadata with assistant_message_id if provided
411
- metadata = {}
412
- if assistant_message_id:
413
- metadata["assistant_message_id"] = assistant_message_id
414
- logging.info(f"Linking tool result to assistant message: {assistant_message_id}")
415
-
416
- # --- Add parsing details to metadata if available ---
417
- if parsing_details:
418
- metadata["parsing_details"] = parsing_details
419
- logging.info("Adding parsing_details to tool result metadata")
420
-
421
- # For XML and other non-native tools, use the new structured format
422
- # Determine message role based on strategy
423
- result_role = "user" if strategy == "user_message" else "assistant"
424
-
425
- # Create two versions of the structured result
426
- # 1. Rich version for the frontend
427
- structured_result_for_frontend = self._create_structured_tool_result(tool_call, result, parsing_details,
428
- for_llm=False)
429
- # 2. Concise version for the LLM
430
- structured_result_for_llm = self._create_structured_tool_result(tool_call, result, parsing_details,
431
- for_llm=True)
432
-
433
- # Add the message with the appropriate role to the conversation history
434
- # This allows the LLM to see the tool result in subsequent interactions
435
- result_message_for_llm = {
436
- "role": result_role,
437
- "content": json.dumps(structured_result_for_llm)
438
- }
368
+ # Execute all tasks concurrently with error handling
369
+ tasks = [self._execute_tool(tool_call) for tool_call in tool_calls]
370
+ results = await asyncio.gather(*tasks, return_exceptions=True)
439
371
 
440
- # Add rich content to metadata for frontend use
441
- if metadata is None:
442
- metadata = {}
443
- metadata['frontend_content'] = structured_result_for_frontend
444
-
445
- message_obj = self.add_response_message(
446
- type="tool",
447
- content=result_message_for_llm, # Save the LLM-friendly version
448
- is_llm_message=True,
449
- metadata=metadata
450
- )
451
-
452
- # If the message was saved, modify it in-memory for the frontend before returning
453
- if message_obj:
454
- # The frontend expects the rich content in the 'content' field.
455
- # The DB has the rich content in metadata.frontend_content.
456
- # Let's reconstruct the message for yielding.
457
- message_for_yield = message_obj.copy()
458
- message_for_yield['content'] = structured_result_for_frontend
459
- return message_for_yield
460
-
461
- return message_obj # Return the modified message object
462
- except Exception as e:
463
- logging.error(f"Error adding tool result: {str(e)}", exc_info=True)
464
- self.root_span.event(name="task_process_error_adding_tool_result", level="ERROR",
465
- status_message=(f"Error adding tool result: {str(e)}"),
466
- metadata={"tool_call": tool_call, "result": result, "strategy": strategy,
467
- "assistant_message_id": assistant_message_id,
468
- "parsing_details": parsing_details})
469
- # Fallback to a simple message
470
- try:
471
- fallback_message = {
472
- "role": "user",
473
- "content": str(result)
474
- }
475
- message_obj = self.add_response_message(
476
- type="tool",
477
- content=fallback_message,
478
- is_llm_message=True,
479
- metadata={"assistant_message_id": assistant_message_id} if assistant_message_id else {}
480
- )
481
- return message_obj # Return the full message object
482
- except Exception as e2:
483
- logging.error(f"Failed even with fallback message: {str(e2)}", exc_info=True)
484
- self.root_span.event(name="task_process_failed_even_with_fallback_message", level="ERROR",
485
- status_message=(f"Failed even with fallback message: {str(e2)}"),
486
- metadata={"tool_call": tool_call, "result": result, "strategy": strategy,
487
- "assistant_message_id": assistant_message_id,
488
- "parsing_details": parsing_details})
489
- return None # Return None on error
490
-
491
- def _create_structured_tool_result(self, tool_call: Dict[str, Any], result: XGAToolResult,
492
- parsing_details: Optional[Dict[str, Any]] = None, for_llm: bool = False):
372
+ processed_results = []
373
+ for i, (tool_call, result) in enumerate(zip(tool_calls, results)):
374
+ processed_results.append((tool_call, result))
375
+
376
+ logging.info(f"TaskProcessor execute_tools_in_parallel: Execution completed for {len(results)} tools, total {tool_num} tools)")
377
+ return processed_results
378
+
379
+
380
+ def _add_tool_messsage(self,
381
+ tool_call: Dict[str, Any],
382
+ result: XGAToolResult,
383
+ strategy: XmlAddingStrategy = "assistant_message",
384
+ assistant_message_id: Optional[str] = None,
385
+ parsing_details: Optional[Dict[str, Any]] = None
386
+ ) -> Optional[Dict[str, Any]]: # Return the full message object
387
+ tool_message = None
388
+
389
+ metadata = {}
390
+ if assistant_message_id:
391
+ metadata["assistant_message_id"] = assistant_message_id
392
+
393
+ if parsing_details:
394
+ metadata["parsing_details"] = parsing_details
395
+
396
+ role = "user" if strategy == "user_message" else "assistant"
397
+
398
+ # Create two versions of the structured result
399
+ # Rich version for the frontend
400
+ result_for_frontend = self._create_structured_tool_result(tool_call, result, parsing_details, for_llm=False)
401
+ # Concise version for the LLM
402
+ result_for_llm = self._create_structured_tool_result(tool_call, result, parsing_details, for_llm=True)
403
+
404
+ # Add the message with the appropriate role to the conversation history
405
+ # This allows the LLM to see the tool result in subsequent interactions
406
+ content = {
407
+ "role": role,
408
+ "content": json.dumps(result_for_llm)
409
+ }
410
+
411
+ metadata['frontend_content'] = result_for_frontend
412
+
413
+ tool_message = self.add_response_message(
414
+ type="tool",
415
+ content=content,
416
+ is_llm_message=True,
417
+ metadata=metadata
418
+ )
419
+
420
+ # Let's reconstruct the message for yielding.
421
+ yield_message = tool_message.copy()
422
+ yield_message['content'] = result_for_frontend
423
+
424
+ return yield_message
425
+
426
+
427
+ def _create_structured_tool_result(self,
428
+ tool_call: Dict[str, Any],
429
+ result: XGAToolResult,
430
+ parsing_details: Optional[Dict[str, Any]] = None,
431
+ for_llm: bool = False) -> Dict[str, Any]:
493
432
  function_name = tool_call.get("function_name", "unknown")
494
433
  xml_tag_name = tool_call.get("xml_tag_name")
495
434
  arguments = tool_call.get("arguments", {})
496
435
  tool_call_id = tool_call.get("id")
497
436
 
498
437
  # Process the output - if it's a JSON string, parse it back to an object
499
- output = result.output if hasattr(result, 'output') else str(result)
500
- if isinstance(output, str):
501
- try:
502
- # Try to parse as JSON to provide structured data to frontend
503
- parsed_output = safe_json_parse(output)
504
- # If parsing succeeded and we got a dict/list, use the parsed version
505
- if isinstance(parsed_output, (dict, list)):
506
- output = parsed_output
507
- # Otherwise keep the original string
508
- except Exception:
509
- # If parsing fails, keep the original string
510
- pass
438
+ output = result.output
439
+ parsed_output = safe_json_parse(output)
440
+ if isinstance(parsed_output, (dict, list)):
441
+ output = parsed_output
511
442
 
512
443
  output_to_use = output
513
444
  # If this is for the LLM and it's an edit_file tool, create a concise output
514
445
  if for_llm and function_name == 'edit_file' and isinstance(output, dict):
515
446
  # The frontend needs original_content and updated_content to render diffs.
516
447
  # The concise version for the LLM was causing issues.
517
- # We will now pass the full output, and rely on the ContextManager to truncate if needed.
448
+ # @todo We will now pass the full output, and rely on the ContextManager to truncate if needed.
518
449
  output_to_use = output
519
450
 
520
- # Create the structured result
521
- structured_result_v1 = {
451
+ structured_result = {
522
452
  "tool_execution": {
523
453
  "function_name": function_name,
524
454
  "xml_tag_name": xml_tag_name,
525
455
  "tool_call_id": tool_call_id,
526
456
  "arguments": arguments,
527
457
  "result": {
528
- "success": result.success if hasattr(result, 'success') else True,
529
- "output": output_to_use, # This will be either rich or concise based on `for_llm`
530
- "error": getattr(result, 'error', None) if hasattr(result, 'error') else None
458
+ "success": result.success,
459
+ "output": output_to_use,
460
+ "error": None if result.success else result.output
531
461
  },
532
462
  }
533
463
  }
534
464
 
535
- return structured_result_v1
465
+ return structured_result
466
+
536
467
 
537
- def _create_tool_context(self, tool_call: Dict[str, Any], tool_index: int,
468
+ def _create_tool_context(self,
469
+ tool_call: Dict[str, Any],
470
+ tool_index: int,
538
471
  assistant_message_id: Optional[str] = None,
539
- parsing_details: Optional[Dict[str, Any]] = None) -> ToolExecutionContext:
472
+ parsing_details: Optional[Dict[str, Any]] = None,
473
+ result: Optional[XGAToolResult] = None,
474
+ ) -> ToolExecutionContext:
540
475
  """Create a tool execution context with display name and parsing details populated."""
541
- context = ToolExecutionContext(
476
+ return ToolExecutionContext(
477
+ function_name=tool_call.get("function_name"),
542
478
  tool_call=tool_call,
543
479
  tool_index=tool_index,
544
480
  assistant_message_id=assistant_message_id,
545
- parsing_details=parsing_details
481
+ parsing_details=parsing_details,
482
+ xml_tag_name=tool_call.get("xml_tag_name"),
483
+ result=result,
546
484
  )
547
485
 
548
- # Set function_name and xml_tag_name fields
549
- context.xml_tag_name = tool_call["xml_tag_name"]
550
- context.function_name = tool_call["function_name"]
551
-
552
- return context
553
486
 
554
487
  def _add_tool_start_message(self, context: ToolExecutionContext) -> Optional[Dict[str, Any]]:
555
488
  """Formats, saves, and returns a tool started status message."""
556
489
  tool_name = context.xml_tag_name or context.function_name
557
490
  content = {
558
- "role": "assistant", "status_type": "tool_started",
559
- "function_name": context.function_name, "xml_tag_name": context.xml_tag_name,
560
- "message": f"Starting execution of {tool_name}", "tool_index": context.tool_index # Include tool_call ID if native
491
+ "status_type": "tool_started",
492
+ "role": "assistant",
493
+ "function_name": context.function_name,
494
+ "xml_tag_name": context.xml_tag_name,
495
+ "message": f"Starting execution of {tool_name}",
496
+ "tool_index": context.tool_index
561
497
  }
562
498
 
563
- return self.add_response_message(
564
- type="status", content=content, is_llm_message=False
565
- )
499
+ return self.add_response_message(type="status", content=content, is_llm_message=False)
566
500
 
567
501
  def _add_tool_completed_message(self, context: ToolExecutionContext, tool_message_id: Optional[str]) -> Optional[Dict[str, Any]]:
568
502
  """Formats, saves, and returns a tool completed/failed status message."""
569
503
  if not context.result:
570
- # Delegate to error saving if result is missing (e.g., execution failed)
571
504
  return self._add_tool_error_message(context)
572
505
 
573
506
  tool_name = context.xml_tag_name or context.function_name
@@ -575,42 +508,38 @@ class TaskResponseProcessor(ABC):
575
508
  message_text = f"Tool {tool_name} {'completed successfully' if context.result.success else 'failed'}"
576
509
 
577
510
  content = {
578
- "role": "assistant", "status_type": status_type,
579
- "function_name": context.function_name, "xml_tag_name": context.xml_tag_name,
580
- "message": message_text, "tool_index": context.tool_index,
511
+ "status_type": status_type,
512
+ "role": "assistant",
513
+ "function_name": context.function_name,
514
+ "xml_tag_name": context.xml_tag_name,
515
+ "message": message_text,
516
+ "tool_index": context.tool_index,
581
517
  "tool_call_id": context.tool_call.get("id")
582
518
  }
519
+
583
520
  metadata = {}
584
521
  # Add the *actual* tool result message ID to the metadata if available and successful
585
522
  if context.result.success and tool_message_id:
586
523
  metadata["linked_tool_result_message_id"] = tool_message_id
587
524
 
588
- # <<< ADDED: Signal if this is a terminating tool >>>
589
525
  if context.function_name in ['ask', 'complete']:
590
526
  metadata["agent_should_terminate"] = "true"
591
- logging.info(f"Marking tool status for '{context.function_name}' with termination signal.")
592
- # self.root_span.event(name="marking_tool_status_for_termination", level="DEFAULT", status_message=(
593
- # f"Marking tool status for '{context.function_name}' with termination signal."))
594
- # <<< END ADDED >>>
595
527
 
596
- return self.add_response_message(
597
- type="status", content=content, is_llm_message=False, metadata=metadata
598
- )
528
+ return self.add_response_message(type="status", content=content, is_llm_message=False, metadata=metadata)
599
529
 
600
530
  def _add_tool_error_message(self, context: ToolExecutionContext) -> Optional[Dict[str, Any]]:
601
531
  """Formats, saves, and returns a tool error status message."""
602
532
  error_msg = str(context.error) if context.error else "Unknown error during tool execution"
603
533
  tool_name = context.xml_tag_name or context.function_name
604
534
  content = {
605
- "role": "assistant", "status_type": "tool_error",
606
- "function_name": context.function_name, "xml_tag_name": context.xml_tag_name,
535
+ "status_type": "tool_error",
536
+ "role": "assistant",
537
+ "function_name": context.function_name,
538
+ "xml_tag_name": context.xml_tag_name,
607
539
  "message": f"Error executing tool {tool_name}: {error_msg}",
608
540
  "tool_index": context.tool_index,
609
541
  "tool_call_id": context.tool_call.get("id")
610
542
  }
611
543
 
612
- # Save the status message with is_llm_message=False
613
- return self.add_response_message(
614
- type="status", content=content, is_llm_message=False
615
- )
544
+ return self.add_response_message(type="status", content=content, is_llm_message=False)
616
545
 
@@ -1,9 +1,8 @@
1
1
  import logging
2
-
2
+ import asyncio
3
3
  from typing import List, Dict, Any, Optional, AsyncGenerator, override
4
- from importlib_metadata import metadata
5
4
 
6
- from xgae.utils import handle_error
5
+ from xgae.utils import log_trace
7
6
  from xgae.utils.json_helpers import format_for_yield
8
7
  from xgae.engine.responser.responser_base import TaskResponseProcessor, TaskResponserContext, TaskRunContinuousState
9
8
 
@@ -22,14 +21,21 @@ class StreamTaskResponser(TaskResponseProcessor):
22
21
  auto_continue_count = continuous_state.get('auto_continue_count', 0)
23
22
  can_auto_continue = continuous_state.get("auto_continue", False)
24
23
  use_assistant_chunk_msg = self.response_context.get("use_assistant_chunk_msg")
25
-
24
+
26
25
  finish_reason = None
27
26
  should_auto_continue = False
28
27
  sequence = continuous_state.get('assistant_msg_sequence', 0)
29
-
28
+
29
+ pending_tool_executions = []
30
+ yielded_tool_indices = set() # Track which tool statuses have been yielded
31
+ tool_results_buffer = [] # Store (tool_call, result, tool_index, context)
32
+ tool_index = 0
33
+ current_xml_content = accumulated_content # Track XML content for streaming detection
34
+
35
+ logging.info(f"=== StreamResp:tool_execute_on_stream={self.tool_execute_on_stream}, auto_continue_count={auto_continue_count}, accumulated_content_len={len(accumulated_content)}")
30
36
  try:
31
37
  async for llm_chunk in llm_response:
32
- if hasattr(llm_chunk, 'choices') and llm_chunk.choices and hasattr(llm_chunk.choices[0], 'finish_reason'):
38
+ if hasattr(llm_chunk, 'choices') and llm_chunk.choices and hasattr(llm_chunk.choices[0],'finish_reason'):
33
39
  if llm_chunk.choices[0].finish_reason:
34
40
  finish_reason = llm_chunk.choices[0].finish_reason
35
41
  logging.info(f"StreamResp:LLM chunk response finish_reason={finish_reason}")
@@ -40,24 +46,71 @@ class StreamTaskResponser(TaskResponseProcessor):
40
46
  if llm_chunk_msg and hasattr(llm_chunk_msg, 'content') and llm_chunk_msg.content:
41
47
  chunk_content = llm_chunk_msg.content
42
48
  accumulated_content += chunk_content
49
+ current_xml_content += chunk_content #Track streaming XML content
43
50
 
44
51
  xml_tool_call_count = len(self._extract_xml_chunks(accumulated_content))
45
52
  if self.max_xml_tool_calls <= 0 or xml_tool_call_count < self.max_xml_tool_calls:
46
53
  if use_assistant_chunk_msg:
47
54
  message_data = {"role": "assistant", "content": chunk_content}
48
55
  metadata = {"sequence": sequence}
49
- assistant_chunk_msg = self.create_response_message(type="assistant_chunk", content=message_data,
50
- is_llm_message=True, metadata=metadata)
56
+ assistant_chunk_msg = self.create_response_message(type="assistant_chunk",content=message_data,
57
+ is_llm_message=True,metadata=metadata)
51
58
  yield assistant_chunk_msg
52
59
 
60
+ #Process XML tool calls during streaming
61
+ if self.tool_execute_on_stream:
62
+ xml_chunks = self._extract_xml_chunks(current_xml_content)
63
+ for xml_chunk in xml_chunks:
64
+ current_xml_content = current_xml_content.replace(xml_chunk, "", 1)
65
+ result = self._parse_xml_tool_call(xml_chunk)
66
+ if result:
67
+ tool_call, parsing_details = result
68
+
69
+ # Create tool context for streaming execution
70
+ tool_context = self._create_tool_context(tool_call, tool_index, None, parsing_details)
71
+
72
+ # Yield tool start status immediately
73
+ tool_start_msg = self._add_tool_start_message(tool_context)
74
+ if tool_start_msg:
75
+ yield format_for_yield(tool_start_msg)
76
+ yielded_tool_indices.add(tool_index)
77
+
78
+ # Create async execution task
79
+ execution_task = asyncio.create_task(self._execute_tool(tool_call))
80
+ pending_tool_executions.append({"task": execution_task,"tool_call": tool_call,"tool_index": tool_index,
81
+ "context": tool_context,"parsing_details": parsing_details})
82
+ tool_index += 1
83
+
53
84
  sequence += 1
54
85
  else:
55
86
  finish_reason = "xml_tool_limit_reached"
56
87
  break
57
88
 
58
- if len(accumulated_content) == 0:
89
+ if len(accumulated_content) == 0:
59
90
  logging.warning(f"StreamResp: LLM response_message content is empty")
60
91
 
92
+ # Wait for pending tool executions from streaming phase
93
+ if pending_tool_executions:
94
+ logging.info(f"Waiting for {len(pending_tool_executions)} pending streamed tool executions")
95
+
96
+ pending_tasks = [execution["task"] for execution in pending_tool_executions]
97
+ done, _ = await asyncio.wait(pending_tasks)
98
+
99
+ for execution in pending_tool_executions:
100
+ tool_idx = execution.get("tool_index", -1)
101
+ context = execution["context"]
102
+
103
+ try:
104
+ if execution["task"].done():
105
+ result = execution["task"].result()
106
+ context.result = result
107
+ tool_results_buffer.append((execution["tool_call"],result,tool_idx,context))
108
+ else:
109
+ logging.warning(f"Task for tool index {tool_idx} not done after wait.")
110
+ except Exception as e:
111
+ logging.error(f"Error getting result for pending tool execution {tool_idx}: {str(e)}")
112
+ context.error = e
113
+
61
114
  if finish_reason == "xml_tool_limit_reached":
62
115
  xml_chunks = self._extract_xml_chunks(accumulated_content)
63
116
  if len(xml_chunks) > self.max_xml_tool_calls:
@@ -66,51 +119,109 @@ class StreamTaskResponser(TaskResponseProcessor):
66
119
  last_chunk = limited_chunks[-1]
67
120
  last_chunk_pos = accumulated_content.find(last_chunk) + len(last_chunk)
68
121
  accumulated_content = accumulated_content[:last_chunk_pos]
69
- parsed_xml_data = self._parse_xml_tool_calls(accumulated_content)
70
122
 
123
+ parsed_xml_data = self._parse_xml_tool_calls(accumulated_content)
71
124
  should_auto_continue = (can_auto_continue and finish_reason == 'length')
72
125
 
73
- self.root_span.event(name=f"stream_processor_start[{self.task_no}]({auto_continue_count})",level="DEFAULT",
126
+ self.root_span.event(name=f"stream_processor_start[{self.task_no}]({auto_continue_count})", level="DEFAULT",
74
127
  status_message=f"finish_reason={finish_reason}, tool_exec_strategy={self.tool_execution_strategy}, "
75
128
  f"parsed_xml_data_len={len(parsed_xml_data)}, accumulated_content={len(accumulated_content)}, "
76
- f"should_auto_continue={should_auto_continue}")
129
+ f"should_auto_continue={should_auto_continue}, pending_executions={len(pending_tool_executions)}")
77
130
 
78
131
  assistant_msg = None
79
132
  if accumulated_content and not should_auto_continue:
80
133
  message_data = {"role": "assistant", "content": accumulated_content}
81
- assistant_msg = self.add_response_message(type="assistant", content=message_data,
82
- is_llm_message=True)
134
+ assistant_msg = self.add_response_message(type="assistant", content=message_data, is_llm_message=True)
83
135
  yield assistant_msg
84
136
 
137
+ # Process results from both streaming and non-streaming executions
85
138
  tool_calls_to_execute = [item['tool_call'] for item in parsed_xml_data]
86
- if len(tool_calls_to_execute) > 0:
87
- tool_results = await self._execute_tools(tool_calls_to_execute, self.tool_execution_strategy)
88
139
 
89
- tool_index = 0
90
- for i, (returned_tool_call, tool_result) in enumerate(tool_results):
91
- parsed_xml_item = parsed_xml_data[i]
92
- tool_call = parsed_xml_item['tool_call']
93
- parsing_details = parsed_xml_item['parsing_details']
94
- assistant_msg_id = assistant_msg['message_id'] if assistant_msg else None
140
+ # Update assistant_message_id for streaming tool contexts
141
+ assistant_msg_id = assistant_msg['message_id'] if assistant_msg else None
142
+ for execution in pending_tool_executions:
143
+ if not execution["context"].assistant_message_id:
144
+ execution["context"].assistant_message_id = assistant_msg_id
95
145
 
96
- tool_context = self._create_tool_context(tool_call, tool_index, assistant_msg_id, parsing_details)
97
- tool_context.result = tool_result
146
+ if len(tool_calls_to_execute) > 0:
147
+ if self.tool_execute_on_stream:
148
+ # Handle results from streaming executions + any remaining tools
149
+ remaining_tools = []
150
+ streamed_tool_indices = set()
151
+
152
+ # Identify which tools were already executed during streaming by index
153
+ for execution in pending_tool_executions:
154
+ streamed_tool_indices.add(execution["tool_index"])
155
+
156
+ # Find remaining tools that weren't executed during streaming
157
+ for i, parsed_item in enumerate(parsed_xml_data):
158
+ tool_call = parsed_item['tool_call']
159
+ tool_identifier = (tool_call.get('function_name', ''), str(tool_call.get('arguments', {})))
160
+
161
+ # Check if this tool was already executed during streaming
162
+ already_executed = False
163
+ for execution in pending_tool_executions:
164
+ exec_tool_call = execution["tool_call"]
165
+ exec_identifier = (exec_tool_call.get('function_name', ''),str(exec_tool_call.get('arguments', {})))
166
+ if tool_identifier == exec_identifier:
167
+ already_executed = True
168
+ break
169
+
170
+ if not already_executed:
171
+ remaining_tools.append((parsed_item['tool_call'], parsed_item['parsing_details'], tool_index))
172
+ tool_index += 1
173
+
174
+ # Execute remaining tools if any
175
+ if remaining_tools:
176
+ for tool_call, parsing_details, t_idx in remaining_tools:
177
+ tool_context = self._create_tool_context(tool_call, t_idx, assistant_msg_id,parsing_details)
178
+
179
+ tool_start_msg = self._add_tool_start_message(tool_context)
180
+ yield format_for_yield(tool_start_msg)
181
+
182
+ result = await self._execute_tool(tool_call)
183
+ tool_context.result = result
184
+ tool_results_buffer.append((tool_call, result, t_idx, tool_context))
185
+
186
+ # Process all tool results
187
+ for tool_call, result, t_idx, context in tool_results_buffer:
188
+ tool_message = self._add_tool_messsage(tool_call, result, self.xml_adding_strategy,assistant_msg_id,
189
+ getattr(context, 'parsing_details', None))
190
+
191
+ tool_completed_msg = self._add_tool_completed_message(context,tool_message['message_id'] if tool_message else None)
192
+ yield format_for_yield(tool_completed_msg)
193
+
194
+ if tool_message:
195
+ yield format_for_yield(tool_message)
196
+
197
+ if tool_completed_msg["metadata"].get("agent_should_terminate") == "true":
198
+ finish_reason = "completed"
199
+ break
200
+ else: # non-streaming execution
201
+ tool_results = await self._execute_tools(tool_calls_to_execute, self.tool_execution_strategy)
202
+ tool_index = 0
203
+ for i, (returned_tool_call, tool_result) in enumerate(tool_results):
204
+ parsed_xml_item = parsed_xml_data[i]
205
+ tool_call = parsed_xml_item['tool_call']
206
+ parsing_details = parsed_xml_item['parsing_details']
98
207
 
99
- tool_start_msg = self._add_tool_start_message(tool_context)
100
- yield format_for_yield(tool_start_msg)
208
+ tool_context = self._create_tool_context(tool_call, tool_index, assistant_msg_id,parsing_details, tool_result)
101
209
 
102
- tool_message = self._add_tool_messsage(tool_call, tool_result, self.xml_adding_strategy, assistant_msg_id, parsing_details)
210
+ tool_start_msg = self._add_tool_start_message(tool_context)
211
+ yield format_for_yield(tool_start_msg)
103
212
 
104
- tool_completed_msg = self._add_tool_completed_message(tool_context, tool_message['message_id'])
105
- yield format_for_yield(tool_completed_msg)
213
+ tool_message = self._add_tool_messsage(tool_call, tool_result, self.xml_adding_strategy,assistant_msg_id, parsing_details)
106
214
 
107
- yield format_for_yield(tool_message)
215
+ tool_completed_msg = self._add_tool_completed_message(tool_context, tool_message['message_id'])
216
+ yield format_for_yield(tool_completed_msg)
108
217
 
109
- if tool_completed_msg["metadata"].get("agent_should_terminate") == "true":
110
- finish_reason = "completed"
111
- break
218
+ yield format_for_yield(tool_message)
219
+
220
+ if tool_completed_msg["metadata"].get("agent_should_terminate") == "true":
221
+ finish_reason = "completed"
222
+ break
112
223
 
113
- tool_index += 1
224
+ tool_index += 1
114
225
  else:
115
226
  finish_reason = "non_tool_call"
116
227
  logging.warning(f"StreamResp: tool_calls is empty, No Tool need to call !")
@@ -120,11 +231,10 @@ class StreamTaskResponser(TaskResponseProcessor):
120
231
  finish_msg = self.add_response_message(type="status", content=finish_content, is_llm_message=False)
121
232
  yield format_for_yield(finish_msg)
122
233
  except Exception as e:
123
- logging.error(f"StreamResp: Process response llm_content: {accumulated_content}")
124
- handle_error(e)
234
+ trace = log_trace(e, f"StreamResp: Process response accumulated_content:\n {accumulated_content}")
125
235
  self.root_span.event(name="stream_response_process_error", level="ERROR",
126
236
  status_message=f"Process streaming response error: {e}",
127
- metadata={"content": accumulated_content})
237
+ metadata={"content": accumulated_content, "trace": trace})
128
238
 
129
239
  content = {"role": "system", "status_type": "error", "message": f"Process streaming response error: {e}"}
130
240
  error_msg = self.add_response_message(type="status", content=content, is_llm_message=False)
@@ -135,4 +245,5 @@ class StreamTaskResponser(TaskResponseProcessor):
135
245
  if should_auto_continue:
136
246
  continuous_state['accumulated_content'] = accumulated_content
137
247
  continuous_state['assistant_msg_sequence'] = sequence
138
- logging.warning(f"StreamResp: Updated continuous state for auto-continue with {len(accumulated_content)} chars")
248
+ logging.warning(
249
+ f"StreamResp: Updated continuous state for auto-continue with {len(accumulated_content)} chars")
@@ -5,7 +5,7 @@ import os
5
5
  from typing import List, Any, Dict, Optional, AsyncGenerator, Union, Literal
6
6
  from uuid import uuid4
7
7
 
8
- from xgae.utils import handle_error, to_bool
8
+ from xgae.utils import log_trace, to_bool
9
9
  from xgae.utils.llm_client import LLMClient, LLMConfig
10
10
  from xgae.utils.json_helpers import format_for_yield
11
11
 
@@ -171,20 +171,19 @@ class XGATaskEngine:
171
171
  update_continuous_state(auto_continue_count, auto_continue)
172
172
  logging.info(f"TaskEngine run_task_auto: Detected finish_reason='{finish_reason}', auto-continuing ({auto_continue_count}/{self.max_auto_run})")
173
173
  except Exception as parse_error:
174
- logging.error(f"TaskEngine run_task_auto: Parse chunk error, chunk: {chunk}")
175
- handle_error(parse_error)
174
+ trace = log_trace(parse_error,f"TaskEngine run_task_auto: Parse chunk error, chunk: {chunk}")
176
175
  self.task_langfuse.root_span.event(name="engine_parse_chunk_error", level="ERROR",
177
- status_message=(f"Task Engine parse chunk error: {parse_error}"),
178
- metadata={"content": chunk})
176
+ status_message=f"Task Engine parse chunk error: {parse_error}",
177
+ metadata={"content": chunk, "trace": trace})
179
178
 
180
179
  content = {"role": "system", "status_type": "error", "message": "Parse response chunk Error"}
181
180
  error_msg = self.add_response_message(type="status", content=content, is_llm_message=False)
182
181
  yield format_for_yield(error_msg)
183
182
  except Exception as run_error:
184
- logging.error(f"TaskEngine run_task_auto: Call task_run_once error: {run_error}")
185
- handle_error(run_error)
183
+ trace = log_trace(run_error, "TaskEngine run_task_auto: Call task_run_once")
186
184
  self.task_langfuse.root_span.event(name="engine_task_run_once_error", level="ERROR",
187
- status_message=(f"Call task_run_once error: {run_error}"))
185
+ status_message=f"Call task_run_once error: {run_error}",
186
+ metadata={"trace": trace})
188
187
 
189
188
  content = {"role": "system", "status_type": "error", "message": "Call run_task_once error"}
190
189
  error_msg = self.add_response_message(type="status", content=content, is_llm_message=False)
@@ -268,11 +267,10 @@ class XGATaskEngine:
268
267
  logging.warning(f"❌ FINAL_RESULT: LLM Result is EMPTY, finish_reason={finish_reason}")
269
268
  final_result = XGATaskResult(type="error", content="LLM has no answer")
270
269
  except Exception as e:
271
- logging.error(f"TaskEngine parse_final_result: Parse message chunk error, chunk: {chunk}")
272
- handle_error(e)
270
+ trace = log_trace(e, f"TaskEngine parse_final_result: Parse message chunk error, chunk: {chunk}")
273
271
  self.task_langfuse.root_span.event(name="engine_parse_final_result_error", level="ERROR",
274
- status_message=(f"Task Engine parse final result error: {e}"),
275
- metadata={"content": chunk})
272
+ status_message=f"Task Engine parse final result error: {e}",
273
+ metadata={"content": chunk, "trace": trace})
276
274
 
277
275
  final_result = XGATaskResult(type="error", content="Parse final result failed!")
278
276
 
@@ -321,8 +319,7 @@ class XGATaskEngine:
321
319
  _content = json.loads(content)
322
320
  response_llm_contents.append(_content)
323
321
  except json.JSONDecodeError as e:
324
- logging.error(f"TaskEngine get_history_llm_messages: Failed to decode json, content: {content}")
325
- handle_error(e)
322
+ pass
326
323
  else:
327
324
  response_llm_contents.append(content)
328
325
 
@@ -347,7 +344,8 @@ class XGATaskEngine:
347
344
  "model_name": self.model_name,
348
345
  "max_xml_tool_calls": 0,
349
346
  "use_assistant_chunk_msg": self.use_assistant_chunk_msg,
350
- "tool_execution_strategy": "parallel" if self.tool_exec_parallel else "sequential", # ,
347
+ "tool_execution_strategy": "parallel" if self.tool_exec_parallel else "sequential",
348
+ "tool_execute_on_stream": False,
351
349
  "xml_adding_strategy": "user_message",
352
350
  "add_response_msg_func": self.add_response_message,
353
351
  "create_response_msg_func": self.create_response_message,
@@ -386,10 +384,9 @@ class XGATaskEngine:
386
384
  logging.debug(f"TASK_RESP_CHUNK[{auto_count}]<{chunk_type}{prefix}> content: {pretty_content}")
387
385
  else:
388
386
  logging.info(f"TASK_RESP_CHUNK[{auto_count}]<{chunk_type}{prefix}> content: {pretty_content}")
389
-
390
387
  except Exception as e:
391
- logging.error(f"TaskEngine logging_reponse_chunk: Decorate chunk log error, chunk: {chunk}")
392
- handle_error(e)
388
+ logging.error(f"TaskEngine logging_reponse_chunk: Decorate chunk={chunk}, error: {e}")
389
+
393
390
 
394
391
 
395
392
  if __name__ == "__main__":
@@ -400,7 +397,7 @@ if __name__ == "__main__":
400
397
  setup_logging()
401
398
 
402
399
  async def main():
403
- # Before Run Exec: uv run custom_fault_tools
400
+ # Before Run Exec: uv run example-fault-tools
404
401
  tool_box = XGAMcpToolBox(custom_mcp_server_file="mcpservers/custom_servers.json")
405
402
  system_prompt = read_file("templates/example/fault_user_prompt.txt")
406
403
  engine = XGATaskEngine(tool_box=tool_box,
@@ -3,6 +3,7 @@ from pydantic import Field
3
3
 
4
4
  from mcp.server.fastmcp import FastMCP
5
5
 
6
+ from xgae.engine.engine_base import XGAToolResult
6
7
 
7
8
  mcp = FastMCP(name="XGAE Message Tools")
8
9
 
@@ -16,7 +17,7 @@ async def complete(task_id: str,
16
17
  description="Comma-separated list of final outputs. Use when: 1) Completion relates to files 2) User needs to review outputs 3) Deliverables in files")]
17
18
  ):
18
19
  print(f"<XGAETools-complete>: task_id={task_id}, text={text}, attachments={attachments}")
19
- return {"status": "complete"}
20
+ return XGAToolResult(success=True, output=str({"status": "complete"}))
20
21
 
21
22
 
22
23
  @mcp.tool(
@@ -29,7 +30,7 @@ async def ask(task_id: str,
29
30
  description="Comma-separated list of files/URLs to attach. Use when: 1) Question relates to files/configs 2) User needs to review content 3) Options documented in files 4) Supporting evidence needed")]
30
31
  ):
31
32
  print(f"<XGAETools-ask>: task_id={task_id}, text={text}, attachments={attachments}")
32
- return {"status": "Awaiting user response..."}
33
+ return XGAToolResult(success=True, output=str({"status": "Awaiting user response..."}))
33
34
 
34
35
  @mcp.tool(
35
36
  description="end task, destroy sandbox"
xgae/utils/__init__.py CHANGED
@@ -1,11 +1,22 @@
1
1
  import logging
2
2
 
3
- def handle_error(e: Exception) -> None:
3
+
4
+ def log_trace(exception: Exception, error: str=None) -> str:
5
+ import traceback
6
+
7
+ if error:
8
+ logging.error(f"{error} , error: {exception}")
9
+
10
+ trace_info = traceback.format_exc()
11
+ logging.error("Trace Details:\n%s", traceback.format_exc())
12
+
13
+ return trace_info
14
+
15
+
16
+ def get_trace() -> str:
4
17
  import traceback
5
18
 
6
- logging.error("An error occurred: %s", str(e))
7
- logging.error("Traceback details:\n%s", traceback.format_exc())
8
- raise (e) from e
19
+ return traceback.format_exc()
9
20
 
10
21
 
11
22
  def to_bool(value: any) -> bool:
@@ -13,3 +24,4 @@ def to_bool(value: any) -> bool:
13
24
  return False
14
25
 
15
26
  return str(value).lower() == "true"
27
+
xgae/utils/misc.py CHANGED
@@ -4,7 +4,6 @@ import sys
4
4
 
5
5
  from typing import Any, Dict
6
6
 
7
- from xgae.utils import handle_error
8
7
 
9
8
  def read_file(file_path: str) -> str:
10
9
  if not os.path.exists(file_path):
@@ -17,7 +16,7 @@ def read_file(file_path: str) -> str:
17
16
  return content
18
17
  except Exception as e:
19
18
  logging.error(f"Read file '{file_path}' failed")
20
- handle_error(e)
19
+ raise
21
20
 
22
21
  def format_file_with_args(file_content:str, args: Dict[str, Any])-> str:
23
22
  from io import StringIO
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: xgae
3
- Version: 0.1.12
3
+ Version: 0.1.14
4
4
  Summary: Extreme General Agent Engine
5
5
  Requires-Python: >=3.13
6
6
  Requires-Dist: colorlog==6.9.0
@@ -0,0 +1,21 @@
1
+ xgae/__init__.py,sha256=OEUd9y9AoGBd3xYerdTTpz9xl4NWkmXeq1a2eil7Qro,72
2
+ xgae/cli_app.py,sha256=vKuCIJw0gwXdtkT-QNCZKt2dE53thvTFwQr7nTgvaPY,3000
3
+ xgae/engine/engine_base.py,sha256=-QZqLRbQdwRUfbY4l3i7dFfMB-BL267a-wGZR9bMPLc,1662
4
+ xgae/engine/mcp_tool_box.py,sha256=ot1lpMw77e6VLe4UPCJw3TxcjIXBqzg_qphdmHmW2XQ,10414
5
+ xgae/engine/prompt_builder.py,sha256=X9bS7YIms6LYplCpNHeUmi74xFP5MwFXmXNqOt1Xz-Q,4356
6
+ xgae/engine/task_engine.py,sha256=ayv4DTwDArMRQ_CPBxt8tdk7E7JPB0Osb5jxlikMsxM,21052
7
+ xgae/engine/task_langfuse.py,sha256=b0aJ_Di-WDcYzi0TFCvcKWxkBz7PYP2jx3N52OptQMs,2349
8
+ xgae/engine/responser/non_stream_responser.py,sha256=_DkyBZ50N1Tm8K0W16MD5vgx2Eu_amt2LgfN_7Ej1Us,5542
9
+ xgae/engine/responser/responser_base.py,sha256=mgioPh1atCd5PueumyxAN7eB87SB7wOSB_mLTI-6lIg,25250
10
+ xgae/engine/responser/stream_responser.py,sha256=AVEs0pkLZNPzXcp5A1iFNjDb-aUy7_NWRqThCZqmIyQ,14852
11
+ xgae/tools/without_general_tools_app.py,sha256=FGMV6njcOKwwfitc0j_nUov0RC-eWlhO1IP8_KHz1tQ,3788
12
+ xgae/utils/__init__.py,sha256=ElaGS-zdeZeu6is41u3Ny7lkvhg7BDSK-jMNg9j6K5A,499
13
+ xgae/utils/json_helpers.py,sha256=ubp-dOCeROnZv7JHARRdmDIO5Npdwzrt8AWo3SMv0kI,4705
14
+ xgae/utils/llm_client.py,sha256=6e3kzx73QN6z2SYMQQFmrmODj2Rk-GPJYIxBcFZhMQE,14361
15
+ xgae/utils/misc.py,sha256=aMWOvJ9VW52q-L9Lkjl1hvXqLwpJAmyxA-Z8jzqFG0U,907
16
+ xgae/utils/setup_env.py,sha256=MqNG0c2QQBDFU1kI8frxr9kB5d08Mmi3QZ1OoorgIa0,2662
17
+ xgae/utils/xml_tool_parser.py,sha256=I9xAZC_ElwBY19PNUq-WLXe9FSIJMeAv2Xs-VlajI7Y,4782
18
+ xgae-0.1.14.dist-info/METADATA,sha256=WGVnO2sWvP3EM98tObwlafmkfIRHl2oSTUn5rBsZ3HM,310
19
+ xgae-0.1.14.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
20
+ xgae-0.1.14.dist-info/entry_points.txt,sha256=SWN01JNAncV0oApEvFzpH0wsXfnFlB1adCH4IrAJxGc,163
21
+ xgae-0.1.14.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  [console_scripts]
2
- custom_fault_tools = examples.tools.custom_fault_tools_app:main
2
+ example-fault-tools = examples.tools.custom_fault_tools_app:main
3
3
  xgae = xgae.cli_app:main
4
4
  xgae-tools = xgae.tools.without_general_tools_app:main
@@ -1,21 +0,0 @@
1
- xgae/__init__.py,sha256=OEUd9y9AoGBd3xYerdTTpz9xl4NWkmXeq1a2eil7Qro,72
2
- xgae/cli_app.py,sha256=vKuCIJw0gwXdtkT-QNCZKt2dE53thvTFwQr7nTgvaPY,3000
3
- xgae/engine/engine_base.py,sha256=-QZqLRbQdwRUfbY4l3i7dFfMB-BL267a-wGZR9bMPLc,1662
4
- xgae/engine/mcp_tool_box.py,sha256=Vyku8uOsTZ4ElnwEEgaw0hiltTOlC-FvtC9Ox-iJYck,10089
5
- xgae/engine/prompt_builder.py,sha256=X9bS7YIms6LYplCpNHeUmi74xFP5MwFXmXNqOt1Xz-Q,4356
6
- xgae/engine/task_engine.py,sha256=J4hVAkRgmd24larSFjxoCaVh9r3fDZJ95fUGB-FICZ8,21174
7
- xgae/engine/task_langfuse.py,sha256=b0aJ_Di-WDcYzi0TFCvcKWxkBz7PYP2jx3N52OptQMs,2349
8
- xgae/engine/responser/non_stream_responser.py,sha256=za1-7b37jVcg1cgTadNNXljqUTbzA92168i80xV7zdw,5589
9
- xgae/engine/responser/responser_base.py,sha256=WA2oKqP-UhQZj2es2nIFKf6_XkOhIfqZMUcQzDhtc6Q,30424
10
- xgae/engine/responser/stream_responser.py,sha256=dXcj-l3jb8J0orZ7THdf0sOjw9M7aZbfjHQC0NwQizo,7868
11
- xgae/tools/without_general_tools_app.py,sha256=cza3aLVh-090QABYA_DakoXmlFmc9rxwrXQsQwveT9A,3655
12
- xgae/utils/__init__.py,sha256=6lZCuEzMj66SW5sXyWrIuLH2W-1mHpbLpIBq_qbnsiw,337
13
- xgae/utils/json_helpers.py,sha256=ubp-dOCeROnZv7JHARRdmDIO5Npdwzrt8AWo3SMv0kI,4705
14
- xgae/utils/llm_client.py,sha256=6e3kzx73QN6z2SYMQQFmrmODj2Rk-GPJYIxBcFZhMQE,14361
15
- xgae/utils/misc.py,sha256=M8lMXYp1pHiY6Ee8ZTUG88GpOAsE5fbYoRO_hcBFUCE,953
16
- xgae/utils/setup_env.py,sha256=MqNG0c2QQBDFU1kI8frxr9kB5d08Mmi3QZ1OoorgIa0,2662
17
- xgae/utils/xml_tool_parser.py,sha256=I9xAZC_ElwBY19PNUq-WLXe9FSIJMeAv2Xs-VlajI7Y,4782
18
- xgae-0.1.12.dist-info/METADATA,sha256=7bOOMvn3Ntj_v_tZy2hkfiNKyTABLhoZRKDb0CJkwpM,310
19
- xgae-0.1.12.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
20
- xgae-0.1.12.dist-info/entry_points.txt,sha256=vClvL_WBJyF2x3wJCz5CNJ_BJG-dWUh7h2YbAoskHsc,162
21
- xgae-0.1.12.dist-info/RECORD,,
File without changes