xgae 0.1.13__py3-none-any.whl → 0.1.14__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of xgae might be problematic. Click here for more details.
- xgae/engine/mcp_tool_box.py +1 -1
- xgae/engine/responser/non_stream_responser.py +1 -2
- xgae/engine/responser/responser_base.py +144 -216
- xgae/engine/responser/stream_responser.py +146 -33
- xgae/engine/task_engine.py +3 -2
- {xgae-0.1.13.dist-info → xgae-0.1.14.dist-info}/METADATA +1 -1
- {xgae-0.1.13.dist-info → xgae-0.1.14.dist-info}/RECORD +9 -9
- {xgae-0.1.13.dist-info → xgae-0.1.14.dist-info}/entry_points.txt +1 -1
- {xgae-0.1.13.dist-info → xgae-0.1.14.dist-info}/WHEEL +0 -0
xgae/engine/mcp_tool_box.py
CHANGED
|
@@ -200,7 +200,7 @@ if __name__ == "__main__":
|
|
|
200
200
|
setup_logging()
|
|
201
201
|
|
|
202
202
|
async def main():
|
|
203
|
-
## Before Run Exec: uv run
|
|
203
|
+
## Before Run Exec: uv run example-fault-tools
|
|
204
204
|
mcp_tool_box = XGAMcpToolBox(custom_mcp_server_file="mcpservers/custom_servers.json")
|
|
205
205
|
#mcp_tool_box = XGAMcpToolBox()
|
|
206
206
|
|
|
@@ -61,8 +61,7 @@ class NonStreamTaskResponser(TaskResponseProcessor):
|
|
|
61
61
|
parsing_details = parsed_xml_item['parsing_details']
|
|
62
62
|
assistant_msg_id = assistant_msg['message_id'] if assistant_msg else None
|
|
63
63
|
|
|
64
|
-
tool_context = self._create_tool_context(tool_call, tool_index, assistant_msg_id, parsing_details)
|
|
65
|
-
tool_context.result = tool_result
|
|
64
|
+
tool_context = self._create_tool_context(tool_call, tool_index, assistant_msg_id, parsing_details, tool_result)
|
|
66
65
|
|
|
67
66
|
tool_start_msg = self._add_tool_start_message(tool_context)
|
|
68
67
|
yield format_for_yield(tool_start_msg)
|
|
@@ -29,6 +29,7 @@ class TaskResponserContext(TypedDict, total=False):
|
|
|
29
29
|
max_xml_tool_calls: int # LLM generate max_xml_tool limit, 0 is no limit
|
|
30
30
|
use_assistant_chunk_msg: bool
|
|
31
31
|
tool_execution_strategy: ToolExecutionStrategy
|
|
32
|
+
tool_execute_on_stream: bool
|
|
32
33
|
xml_adding_strategy: XmlAddingStrategy
|
|
33
34
|
add_response_msg_func: Callable
|
|
34
35
|
create_response_msg_func: Callable
|
|
@@ -48,8 +49,8 @@ class ToolExecutionContext:
|
|
|
48
49
|
"""Context for a tool execution including call details, result, and display info."""
|
|
49
50
|
tool_call: Dict[str, Any]
|
|
50
51
|
tool_index: int
|
|
51
|
-
result: Optional[XGAToolResult] = None
|
|
52
52
|
function_name: Optional[str] = None
|
|
53
|
+
result: Optional[XGAToolResult] = None
|
|
53
54
|
xml_tag_name: Optional[str] = None
|
|
54
55
|
error: Optional[Exception] = None
|
|
55
56
|
assistant_message_id: Optional[str] = None
|
|
@@ -66,6 +67,7 @@ class TaskResponseProcessor(ABC):
|
|
|
66
67
|
self.tool_execution_strategy = self.response_context.get("tool_execution_strategy", "parallel")
|
|
67
68
|
self.xml_adding_strategy = self.response_context.get("xml_adding_strategy", "user_message")
|
|
68
69
|
self.max_xml_tool_calls = self.response_context.get("max_xml_tool_calls", 0)
|
|
70
|
+
self.tool_execute_on_stream = response_context.get("tool_execute_on_stream", False)
|
|
69
71
|
|
|
70
72
|
task_langfuse = response_context.get("task_langfuse")
|
|
71
73
|
self.root_span = task_langfuse.root_span
|
|
@@ -291,11 +293,10 @@ class TaskResponseProcessor(ABC):
|
|
|
291
293
|
|
|
292
294
|
return XGAToolResult(success=False, output=f"Executing tool {function_name}, error: {str(e)}")
|
|
293
295
|
|
|
296
|
+
|
|
294
297
|
async def _execute_tools(self, tool_calls: List[Dict[str, Any]],
|
|
295
298
|
execution_strategy: ToolExecutionStrategy = "sequential"
|
|
296
299
|
) -> List[Tuple[Dict[str, Any], XGAToolResult]]:
|
|
297
|
-
logging.info(f"TaskProcessor execute_tools: Executing {len(tool_calls)} tools with strategy '{execution_strategy}'")
|
|
298
|
-
|
|
299
300
|
if execution_strategy == "sequential":
|
|
300
301
|
return await self._execute_tools_sequentially(tool_calls)
|
|
301
302
|
elif execution_strategy == "parallel":
|
|
@@ -304,271 +305,202 @@ class TaskResponseProcessor(ABC):
|
|
|
304
305
|
logging.warning(f"TaskProcessor execute_tools: Unknown execution strategy '{execution_strategy}', use sequential")
|
|
305
306
|
return await self._execute_tools_sequentially(tool_calls)
|
|
306
307
|
|
|
307
|
-
|
|
308
|
+
|
|
308
309
|
async def _execute_tools_sequentially(self, tool_calls: List[Dict[str, Any]]) -> List[Tuple[Dict[str, Any], XGAToolResult]]:
|
|
309
310
|
"""Execute tool calls sequentially and return results.
|
|
310
311
|
|
|
311
312
|
This method executes tool calls one after another, waiting for each tool to complete
|
|
312
313
|
before starting the next one. This is useful when tools have dependencies on each other.
|
|
313
314
|
|
|
314
|
-
Args:
|
|
315
|
-
tool_calls: List of tool calls to execute
|
|
316
|
-
|
|
317
315
|
Returns:
|
|
318
316
|
List of tuples containing the original tool call and its result
|
|
319
317
|
"""
|
|
320
318
|
if not tool_calls:
|
|
319
|
+
logging.warning("TaskProcessor execute_tools_sequentially: tool_calls is empty")
|
|
321
320
|
return []
|
|
322
321
|
|
|
323
322
|
tool_names = [t.get('function_name', 'unknown') for t in tool_calls]
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
323
|
+
tool_num = len(tool_calls)
|
|
324
|
+
if tool_num > 1:
|
|
325
|
+
logging.info(f"TaskProcessor execute_tools_sequentially: Executing {tool_num} tools sequentially: {tool_names}")
|
|
326
|
+
self.root_span.event(name="task_process_execute_tools_sequentially", level="DEFAULT",
|
|
327
|
+
status_message=f"Executing {len(tool_calls)} tools sequentially: {tool_names}")
|
|
327
328
|
|
|
328
329
|
results = []
|
|
329
330
|
for index, tool_call in enumerate(tool_calls):
|
|
330
331
|
tool_name = tool_call.get('function_name', 'unknown')
|
|
331
|
-
logging.
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
except Exception as e:
|
|
346
|
-
logging.error(f"Error executing tool {tool_name}: {str(e)}")
|
|
347
|
-
self.root_span.event(name="task_process_error_executing_tool", level="ERROR",
|
|
348
|
-
status_message=(f"Error executing tool {tool_name}: {str(e)}"))
|
|
349
|
-
error_result = XGAToolResult(success=False, output=f"Error executing tool: {str(e)}")
|
|
350
|
-
results.append((tool_call, error_result))
|
|
351
|
-
|
|
352
|
-
logging.info(f"Sequential execution completed for {len(results)} tools (out of {len(tool_calls)} total)")
|
|
353
|
-
# self.root_span.event(name="sequential_execution_completed", level="DEFAULT",
|
|
354
|
-
# status_message=(f"Sequential execution completed for {len(results)} tools (out of {len(tool_calls)} total)"))
|
|
332
|
+
logging.info(f"TaskProcessor execute_tools_sequentially: Executing tool '{tool_name}', sequence={index + 1}/{tool_num}")
|
|
333
|
+
result = await self._execute_tool(tool_call)
|
|
334
|
+
results.append((tool_call, result))
|
|
335
|
+
|
|
336
|
+
# Check if this is a terminating tool (ask or complete)
|
|
337
|
+
if tool_name in ['ask', 'complete']:
|
|
338
|
+
if len(results) < tool_num:
|
|
339
|
+
logging.info(f"TaskProcessor execute_tools_sequentially: Terminating tool '{tool_name}' executed, Stopping further tool execution.")
|
|
340
|
+
self.root_span.event(name="task_process_terminate_tool_executed", level="DEFAULT",
|
|
341
|
+
status_message=f"Terminating tool '{tool_name}' executed, Stopping further tool execution.")
|
|
342
|
+
break
|
|
343
|
+
|
|
344
|
+
logging.info(f"TaskProcessor execute_tools_sequentially: Execution completed for {len(results)} tools, total {tool_num} tools)")
|
|
355
345
|
return results
|
|
356
346
|
|
|
357
347
|
|
|
358
348
|
async def _execute_tools_in_parallel(self, tool_calls: List[Dict[str, Any]]) -> List[Tuple[Dict[str, Any], XGAToolResult]]:
|
|
349
|
+
"""Execute tool calls in parallel and return results.
|
|
350
|
+
|
|
351
|
+
This method executes all tool calls simultaneously using asyncio.gather, which
|
|
352
|
+
can significantly improve performance when executing multiple independent tools.
|
|
353
|
+
|
|
354
|
+
Returns:
|
|
355
|
+
List of tuples containing the original tool call and its result
|
|
356
|
+
"""
|
|
359
357
|
if not tool_calls:
|
|
358
|
+
logging.warning("TaskProcessor execute_tools_in_parallel: tool_calls is empty")
|
|
360
359
|
return []
|
|
361
360
|
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
# Create tasks for all tool calls
|
|
369
|
-
tasks = [self._execute_tool(tool_call) for tool_call in tool_calls]
|
|
370
|
-
|
|
371
|
-
# Execute all tasks concurrently with error handling
|
|
372
|
-
results = await asyncio.gather(*tasks, return_exceptions=True)
|
|
373
|
-
|
|
374
|
-
# Process results and handle any exceptions
|
|
375
|
-
processed_results = []
|
|
376
|
-
for i, (tool_call, result) in enumerate(zip(tool_calls, results)):
|
|
377
|
-
if isinstance(result, Exception):
|
|
378
|
-
logging.error(f"Error executing tool {tool_call.get('function_name', 'unknown')}: {str(result)}")
|
|
379
|
-
self.root_span.event(name="task_process_error_executing_tool", level="ERROR", status_message=(
|
|
380
|
-
f"Error executing tool {tool_call.get('function_name', 'unknown')}: {str(result)}"))
|
|
381
|
-
# Create error result
|
|
382
|
-
error_result = XGAToolResult(success=False, output=f"Error executing tool: {str(result)}")
|
|
383
|
-
processed_results.append((tool_call, error_result))
|
|
384
|
-
else:
|
|
385
|
-
processed_results.append((tool_call, result))
|
|
386
|
-
|
|
387
|
-
logging.info(f"Parallel execution completed for {len(tool_calls)} tools")
|
|
388
|
-
# self.root_span.event(name="parallel_execution_completed", level="DEFAULT",
|
|
389
|
-
# status_message=(f"Parallel execution completed for {len(tool_calls)} tools"))
|
|
390
|
-
return processed_results
|
|
361
|
+
tool_names = [t.get('function_name', 'unknown') for t in tool_calls]
|
|
362
|
+
tool_num = len(tool_calls)
|
|
363
|
+
if tool_num > 1:
|
|
364
|
+
logging.info(f"TaskProcessor execute_tools_in_parallel: Executing {tool_num} tools sequentially: {tool_names}")
|
|
365
|
+
self.root_span.event(name="task_process_execute_tools_parallel", level="DEFAULT",
|
|
366
|
+
status_message=f"Executing {len(tool_calls)} tools parallelly: {tool_names}")
|
|
391
367
|
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
status_message=(f"Error in parallel tool execution: {str(e)}"))
|
|
396
|
-
# Return error results for all tools if the gather itself fails
|
|
397
|
-
return [(tool_call, XGAToolResult(success=False, output=f"Execution error: {str(e)}"))
|
|
398
|
-
for tool_call in tool_calls]
|
|
399
|
-
|
|
400
|
-
def _add_tool_messsage(
|
|
401
|
-
self,
|
|
402
|
-
tool_call: Dict[str, Any],
|
|
403
|
-
result: XGAToolResult,
|
|
404
|
-
strategy: XmlAddingStrategy = "assistant_message",
|
|
405
|
-
assistant_message_id: Optional[str] = None,
|
|
406
|
-
parsing_details: Optional[Dict[str, Any]] = None
|
|
407
|
-
) -> Optional[Dict[str, Any]]: # Return the full message object
|
|
408
|
-
try:
|
|
409
|
-
message_obj = None # Initialize message_obj
|
|
410
|
-
|
|
411
|
-
# Create metadata with assistant_message_id if provided
|
|
412
|
-
metadata = {}
|
|
413
|
-
if assistant_message_id:
|
|
414
|
-
metadata["assistant_message_id"] = assistant_message_id
|
|
415
|
-
logging.info(f"Linking tool result to assistant message: {assistant_message_id}")
|
|
416
|
-
|
|
417
|
-
# --- Add parsing details to metadata if available ---
|
|
418
|
-
if parsing_details:
|
|
419
|
-
metadata["parsing_details"] = parsing_details
|
|
420
|
-
logging.info("Adding parsing_details to tool result metadata")
|
|
421
|
-
|
|
422
|
-
# For XML and other non-native tools, use the new structured format
|
|
423
|
-
# Determine message role based on strategy
|
|
424
|
-
result_role = "user" if strategy == "user_message" else "assistant"
|
|
425
|
-
|
|
426
|
-
# Create two versions of the structured result
|
|
427
|
-
# 1. Rich version for the frontend
|
|
428
|
-
structured_result_for_frontend = self._create_structured_tool_result(tool_call, result, parsing_details,
|
|
429
|
-
for_llm=False)
|
|
430
|
-
# 2. Concise version for the LLM
|
|
431
|
-
structured_result_for_llm = self._create_structured_tool_result(tool_call, result, parsing_details,
|
|
432
|
-
for_llm=True)
|
|
433
|
-
|
|
434
|
-
# Add the message with the appropriate role to the conversation history
|
|
435
|
-
# This allows the LLM to see the tool result in subsequent interactions
|
|
436
|
-
result_message_for_llm = {
|
|
437
|
-
"role": result_role,
|
|
438
|
-
"content": json.dumps(structured_result_for_llm)
|
|
439
|
-
}
|
|
368
|
+
# Execute all tasks concurrently with error handling
|
|
369
|
+
tasks = [self._execute_tool(tool_call) for tool_call in tool_calls]
|
|
370
|
+
results = await asyncio.gather(*tasks, return_exceptions=True)
|
|
440
371
|
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
372
|
+
processed_results = []
|
|
373
|
+
for i, (tool_call, result) in enumerate(zip(tool_calls, results)):
|
|
374
|
+
processed_results.append((tool_call, result))
|
|
375
|
+
|
|
376
|
+
logging.info(f"TaskProcessor execute_tools_in_parallel: Execution completed for {len(results)} tools, total {tool_num} tools)")
|
|
377
|
+
return processed_results
|
|
378
|
+
|
|
379
|
+
|
|
380
|
+
def _add_tool_messsage(self,
|
|
381
|
+
tool_call: Dict[str, Any],
|
|
382
|
+
result: XGAToolResult,
|
|
383
|
+
strategy: XmlAddingStrategy = "assistant_message",
|
|
384
|
+
assistant_message_id: Optional[str] = None,
|
|
385
|
+
parsing_details: Optional[Dict[str, Any]] = None
|
|
386
|
+
) -> Optional[Dict[str, Any]]: # Return the full message object
|
|
387
|
+
tool_message = None
|
|
388
|
+
|
|
389
|
+
metadata = {}
|
|
390
|
+
if assistant_message_id:
|
|
391
|
+
metadata["assistant_message_id"] = assistant_message_id
|
|
392
|
+
|
|
393
|
+
if parsing_details:
|
|
394
|
+
metadata["parsing_details"] = parsing_details
|
|
395
|
+
|
|
396
|
+
role = "user" if strategy == "user_message" else "assistant"
|
|
397
|
+
|
|
398
|
+
# Create two versions of the structured result
|
|
399
|
+
# Rich version for the frontend
|
|
400
|
+
result_for_frontend = self._create_structured_tool_result(tool_call, result, parsing_details, for_llm=False)
|
|
401
|
+
# Concise version for the LLM
|
|
402
|
+
result_for_llm = self._create_structured_tool_result(tool_call, result, parsing_details, for_llm=True)
|
|
403
|
+
|
|
404
|
+
# Add the message with the appropriate role to the conversation history
|
|
405
|
+
# This allows the LLM to see the tool result in subsequent interactions
|
|
406
|
+
content = {
|
|
407
|
+
"role": role,
|
|
408
|
+
"content": json.dumps(result_for_llm)
|
|
409
|
+
}
|
|
410
|
+
|
|
411
|
+
metadata['frontend_content'] = result_for_frontend
|
|
412
|
+
|
|
413
|
+
tool_message = self.add_response_message(
|
|
414
|
+
type="tool",
|
|
415
|
+
content=content,
|
|
416
|
+
is_llm_message=True,
|
|
417
|
+
metadata=metadata
|
|
418
|
+
)
|
|
419
|
+
|
|
420
|
+
# Let's reconstruct the message for yielding.
|
|
421
|
+
yield_message = tool_message.copy()
|
|
422
|
+
yield_message['content'] = result_for_frontend
|
|
423
|
+
|
|
424
|
+
return yield_message
|
|
425
|
+
|
|
426
|
+
|
|
427
|
+
def _create_structured_tool_result(self,
|
|
428
|
+
tool_call: Dict[str, Any],
|
|
429
|
+
result: XGAToolResult,
|
|
430
|
+
parsing_details: Optional[Dict[str, Any]] = None,
|
|
431
|
+
for_llm: bool = False) -> Dict[str, Any]:
|
|
494
432
|
function_name = tool_call.get("function_name", "unknown")
|
|
495
433
|
xml_tag_name = tool_call.get("xml_tag_name")
|
|
496
434
|
arguments = tool_call.get("arguments", {})
|
|
497
435
|
tool_call_id = tool_call.get("id")
|
|
498
436
|
|
|
499
437
|
# Process the output - if it's a JSON string, parse it back to an object
|
|
500
|
-
output = result.output
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
|
|
504
|
-
parsed_output = safe_json_parse(output)
|
|
505
|
-
# If parsing succeeded and we got a dict/list, use the parsed version
|
|
506
|
-
if isinstance(parsed_output, (dict, list)):
|
|
507
|
-
output = parsed_output
|
|
508
|
-
# Otherwise keep the original string
|
|
509
|
-
except Exception:
|
|
510
|
-
# If parsing fails, keep the original string
|
|
511
|
-
pass
|
|
438
|
+
output = result.output
|
|
439
|
+
parsed_output = safe_json_parse(output)
|
|
440
|
+
if isinstance(parsed_output, (dict, list)):
|
|
441
|
+
output = parsed_output
|
|
512
442
|
|
|
513
443
|
output_to_use = output
|
|
514
444
|
# If this is for the LLM and it's an edit_file tool, create a concise output
|
|
515
445
|
if for_llm and function_name == 'edit_file' and isinstance(output, dict):
|
|
516
446
|
# The frontend needs original_content and updated_content to render diffs.
|
|
517
447
|
# The concise version for the LLM was causing issues.
|
|
518
|
-
# We will now pass the full output, and rely on the ContextManager to truncate if needed.
|
|
448
|
+
# @todo We will now pass the full output, and rely on the ContextManager to truncate if needed.
|
|
519
449
|
output_to_use = output
|
|
520
450
|
|
|
521
|
-
|
|
522
|
-
structured_result_v1 = {
|
|
451
|
+
structured_result = {
|
|
523
452
|
"tool_execution": {
|
|
524
453
|
"function_name": function_name,
|
|
525
454
|
"xml_tag_name": xml_tag_name,
|
|
526
455
|
"tool_call_id": tool_call_id,
|
|
527
456
|
"arguments": arguments,
|
|
528
457
|
"result": {
|
|
529
|
-
"success": result.success
|
|
530
|
-
"output": output_to_use,
|
|
531
|
-
"error":
|
|
458
|
+
"success": result.success,
|
|
459
|
+
"output": output_to_use,
|
|
460
|
+
"error": None if result.success else result.output
|
|
532
461
|
},
|
|
533
462
|
}
|
|
534
463
|
}
|
|
535
464
|
|
|
536
|
-
return
|
|
465
|
+
return structured_result
|
|
537
466
|
|
|
538
|
-
|
|
467
|
+
|
|
468
|
+
def _create_tool_context(self,
|
|
469
|
+
tool_call: Dict[str, Any],
|
|
470
|
+
tool_index: int,
|
|
539
471
|
assistant_message_id: Optional[str] = None,
|
|
540
|
-
parsing_details: Optional[Dict[str, Any]] = None
|
|
472
|
+
parsing_details: Optional[Dict[str, Any]] = None,
|
|
473
|
+
result: Optional[XGAToolResult] = None,
|
|
474
|
+
) -> ToolExecutionContext:
|
|
541
475
|
"""Create a tool execution context with display name and parsing details populated."""
|
|
542
|
-
|
|
476
|
+
return ToolExecutionContext(
|
|
477
|
+
function_name=tool_call.get("function_name"),
|
|
543
478
|
tool_call=tool_call,
|
|
544
479
|
tool_index=tool_index,
|
|
545
480
|
assistant_message_id=assistant_message_id,
|
|
546
|
-
parsing_details=parsing_details
|
|
481
|
+
parsing_details=parsing_details,
|
|
482
|
+
xml_tag_name=tool_call.get("xml_tag_name"),
|
|
483
|
+
result=result,
|
|
547
484
|
)
|
|
548
485
|
|
|
549
|
-
# Set function_name and xml_tag_name fields
|
|
550
|
-
context.xml_tag_name = tool_call["xml_tag_name"]
|
|
551
|
-
context.function_name = tool_call["function_name"]
|
|
552
|
-
|
|
553
|
-
return context
|
|
554
486
|
|
|
555
487
|
def _add_tool_start_message(self, context: ToolExecutionContext) -> Optional[Dict[str, Any]]:
|
|
556
488
|
"""Formats, saves, and returns a tool started status message."""
|
|
557
489
|
tool_name = context.xml_tag_name or context.function_name
|
|
558
490
|
content = {
|
|
559
|
-
"
|
|
560
|
-
"
|
|
561
|
-
"
|
|
491
|
+
"status_type": "tool_started",
|
|
492
|
+
"role": "assistant",
|
|
493
|
+
"function_name": context.function_name,
|
|
494
|
+
"xml_tag_name": context.xml_tag_name,
|
|
495
|
+
"message": f"Starting execution of {tool_name}",
|
|
496
|
+
"tool_index": context.tool_index
|
|
562
497
|
}
|
|
563
498
|
|
|
564
|
-
return self.add_response_message(
|
|
565
|
-
type="status", content=content, is_llm_message=False
|
|
566
|
-
)
|
|
499
|
+
return self.add_response_message(type="status", content=content, is_llm_message=False)
|
|
567
500
|
|
|
568
501
|
def _add_tool_completed_message(self, context: ToolExecutionContext, tool_message_id: Optional[str]) -> Optional[Dict[str, Any]]:
|
|
569
502
|
"""Formats, saves, and returns a tool completed/failed status message."""
|
|
570
503
|
if not context.result:
|
|
571
|
-
# Delegate to error saving if result is missing (e.g., execution failed)
|
|
572
504
|
return self._add_tool_error_message(context)
|
|
573
505
|
|
|
574
506
|
tool_name = context.xml_tag_name or context.function_name
|
|
@@ -576,42 +508,38 @@ class TaskResponseProcessor(ABC):
|
|
|
576
508
|
message_text = f"Tool {tool_name} {'completed successfully' if context.result.success else 'failed'}"
|
|
577
509
|
|
|
578
510
|
content = {
|
|
579
|
-
"
|
|
580
|
-
"
|
|
581
|
-
"
|
|
511
|
+
"status_type": status_type,
|
|
512
|
+
"role": "assistant",
|
|
513
|
+
"function_name": context.function_name,
|
|
514
|
+
"xml_tag_name": context.xml_tag_name,
|
|
515
|
+
"message": message_text,
|
|
516
|
+
"tool_index": context.tool_index,
|
|
582
517
|
"tool_call_id": context.tool_call.get("id")
|
|
583
518
|
}
|
|
519
|
+
|
|
584
520
|
metadata = {}
|
|
585
521
|
# Add the *actual* tool result message ID to the metadata if available and successful
|
|
586
522
|
if context.result.success and tool_message_id:
|
|
587
523
|
metadata["linked_tool_result_message_id"] = tool_message_id
|
|
588
524
|
|
|
589
|
-
# <<< ADDED: Signal if this is a terminating tool >>>
|
|
590
525
|
if context.function_name in ['ask', 'complete']:
|
|
591
526
|
metadata["agent_should_terminate"] = "true"
|
|
592
|
-
logging.info(f"Marking tool status for '{context.function_name}' with termination signal.")
|
|
593
|
-
# self.root_span.event(name="marking_tool_status_for_termination", level="DEFAULT", status_message=(
|
|
594
|
-
# f"Marking tool status for '{context.function_name}' with termination signal."))
|
|
595
|
-
# <<< END ADDED >>>
|
|
596
527
|
|
|
597
|
-
return self.add_response_message(
|
|
598
|
-
type="status", content=content, is_llm_message=False, metadata=metadata
|
|
599
|
-
)
|
|
528
|
+
return self.add_response_message(type="status", content=content, is_llm_message=False, metadata=metadata)
|
|
600
529
|
|
|
601
530
|
def _add_tool_error_message(self, context: ToolExecutionContext) -> Optional[Dict[str, Any]]:
|
|
602
531
|
"""Formats, saves, and returns a tool error status message."""
|
|
603
532
|
error_msg = str(context.error) if context.error else "Unknown error during tool execution"
|
|
604
533
|
tool_name = context.xml_tag_name or context.function_name
|
|
605
534
|
content = {
|
|
606
|
-
"
|
|
607
|
-
"
|
|
535
|
+
"status_type": "tool_error",
|
|
536
|
+
"role": "assistant",
|
|
537
|
+
"function_name": context.function_name,
|
|
538
|
+
"xml_tag_name": context.xml_tag_name,
|
|
608
539
|
"message": f"Error executing tool {tool_name}: {error_msg}",
|
|
609
540
|
"tool_index": context.tool_index,
|
|
610
541
|
"tool_call_id": context.tool_call.get("id")
|
|
611
542
|
}
|
|
612
543
|
|
|
613
|
-
|
|
614
|
-
return self.add_response_message(
|
|
615
|
-
type="status", content=content, is_llm_message=False
|
|
616
|
-
)
|
|
544
|
+
return self.add_response_message(type="status", content=content, is_llm_message=False)
|
|
617
545
|
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import logging
|
|
2
|
-
|
|
2
|
+
import asyncio
|
|
3
3
|
from typing import List, Dict, Any, Optional, AsyncGenerator, override
|
|
4
4
|
|
|
5
5
|
from xgae.utils import log_trace
|
|
@@ -21,14 +21,21 @@ class StreamTaskResponser(TaskResponseProcessor):
|
|
|
21
21
|
auto_continue_count = continuous_state.get('auto_continue_count', 0)
|
|
22
22
|
can_auto_continue = continuous_state.get("auto_continue", False)
|
|
23
23
|
use_assistant_chunk_msg = self.response_context.get("use_assistant_chunk_msg")
|
|
24
|
-
|
|
24
|
+
|
|
25
25
|
finish_reason = None
|
|
26
26
|
should_auto_continue = False
|
|
27
27
|
sequence = continuous_state.get('assistant_msg_sequence', 0)
|
|
28
|
-
|
|
28
|
+
|
|
29
|
+
pending_tool_executions = []
|
|
30
|
+
yielded_tool_indices = set() # Track which tool statuses have been yielded
|
|
31
|
+
tool_results_buffer = [] # Store (tool_call, result, tool_index, context)
|
|
32
|
+
tool_index = 0
|
|
33
|
+
current_xml_content = accumulated_content # Track XML content for streaming detection
|
|
34
|
+
|
|
35
|
+
logging.info(f"=== StreamResp:tool_execute_on_stream={self.tool_execute_on_stream}, auto_continue_count={auto_continue_count}, accumulated_content_len={len(accumulated_content)}")
|
|
29
36
|
try:
|
|
30
37
|
async for llm_chunk in llm_response:
|
|
31
|
-
if hasattr(llm_chunk, 'choices') and llm_chunk.choices and hasattr(llm_chunk.choices[0],
|
|
38
|
+
if hasattr(llm_chunk, 'choices') and llm_chunk.choices and hasattr(llm_chunk.choices[0],'finish_reason'):
|
|
32
39
|
if llm_chunk.choices[0].finish_reason:
|
|
33
40
|
finish_reason = llm_chunk.choices[0].finish_reason
|
|
34
41
|
logging.info(f"StreamResp:LLM chunk response finish_reason={finish_reason}")
|
|
@@ -39,24 +46,71 @@ class StreamTaskResponser(TaskResponseProcessor):
|
|
|
39
46
|
if llm_chunk_msg and hasattr(llm_chunk_msg, 'content') and llm_chunk_msg.content:
|
|
40
47
|
chunk_content = llm_chunk_msg.content
|
|
41
48
|
accumulated_content += chunk_content
|
|
49
|
+
current_xml_content += chunk_content #Track streaming XML content
|
|
42
50
|
|
|
43
51
|
xml_tool_call_count = len(self._extract_xml_chunks(accumulated_content))
|
|
44
52
|
if self.max_xml_tool_calls <= 0 or xml_tool_call_count < self.max_xml_tool_calls:
|
|
45
53
|
if use_assistant_chunk_msg:
|
|
46
54
|
message_data = {"role": "assistant", "content": chunk_content}
|
|
47
55
|
metadata = {"sequence": sequence}
|
|
48
|
-
assistant_chunk_msg = self.create_response_message(type="assistant_chunk",
|
|
49
|
-
is_llm_message=True,
|
|
56
|
+
assistant_chunk_msg = self.create_response_message(type="assistant_chunk",content=message_data,
|
|
57
|
+
is_llm_message=True,metadata=metadata)
|
|
50
58
|
yield assistant_chunk_msg
|
|
51
59
|
|
|
60
|
+
#Process XML tool calls during streaming
|
|
61
|
+
if self.tool_execute_on_stream:
|
|
62
|
+
xml_chunks = self._extract_xml_chunks(current_xml_content)
|
|
63
|
+
for xml_chunk in xml_chunks:
|
|
64
|
+
current_xml_content = current_xml_content.replace(xml_chunk, "", 1)
|
|
65
|
+
result = self._parse_xml_tool_call(xml_chunk)
|
|
66
|
+
if result:
|
|
67
|
+
tool_call, parsing_details = result
|
|
68
|
+
|
|
69
|
+
# Create tool context for streaming execution
|
|
70
|
+
tool_context = self._create_tool_context(tool_call, tool_index, None, parsing_details)
|
|
71
|
+
|
|
72
|
+
# Yield tool start status immediately
|
|
73
|
+
tool_start_msg = self._add_tool_start_message(tool_context)
|
|
74
|
+
if tool_start_msg:
|
|
75
|
+
yield format_for_yield(tool_start_msg)
|
|
76
|
+
yielded_tool_indices.add(tool_index)
|
|
77
|
+
|
|
78
|
+
# Create async execution task
|
|
79
|
+
execution_task = asyncio.create_task(self._execute_tool(tool_call))
|
|
80
|
+
pending_tool_executions.append({"task": execution_task,"tool_call": tool_call,"tool_index": tool_index,
|
|
81
|
+
"context": tool_context,"parsing_details": parsing_details})
|
|
82
|
+
tool_index += 1
|
|
83
|
+
|
|
52
84
|
sequence += 1
|
|
53
85
|
else:
|
|
54
86
|
finish_reason = "xml_tool_limit_reached"
|
|
55
87
|
break
|
|
56
88
|
|
|
57
|
-
if
|
|
89
|
+
if len(accumulated_content) == 0:
|
|
58
90
|
logging.warning(f"StreamResp: LLM response_message content is empty")
|
|
59
91
|
|
|
92
|
+
# Wait for pending tool executions from streaming phase
|
|
93
|
+
if pending_tool_executions:
|
|
94
|
+
logging.info(f"Waiting for {len(pending_tool_executions)} pending streamed tool executions")
|
|
95
|
+
|
|
96
|
+
pending_tasks = [execution["task"] for execution in pending_tool_executions]
|
|
97
|
+
done, _ = await asyncio.wait(pending_tasks)
|
|
98
|
+
|
|
99
|
+
for execution in pending_tool_executions:
|
|
100
|
+
tool_idx = execution.get("tool_index", -1)
|
|
101
|
+
context = execution["context"]
|
|
102
|
+
|
|
103
|
+
try:
|
|
104
|
+
if execution["task"].done():
|
|
105
|
+
result = execution["task"].result()
|
|
106
|
+
context.result = result
|
|
107
|
+
tool_results_buffer.append((execution["tool_call"],result,tool_idx,context))
|
|
108
|
+
else:
|
|
109
|
+
logging.warning(f"Task for tool index {tool_idx} not done after wait.")
|
|
110
|
+
except Exception as e:
|
|
111
|
+
logging.error(f"Error getting result for pending tool execution {tool_idx}: {str(e)}")
|
|
112
|
+
context.error = e
|
|
113
|
+
|
|
60
114
|
if finish_reason == "xml_tool_limit_reached":
|
|
61
115
|
xml_chunks = self._extract_xml_chunks(accumulated_content)
|
|
62
116
|
if len(xml_chunks) > self.max_xml_tool_calls:
|
|
@@ -65,51 +119,109 @@ class StreamTaskResponser(TaskResponseProcessor):
|
|
|
65
119
|
last_chunk = limited_chunks[-1]
|
|
66
120
|
last_chunk_pos = accumulated_content.find(last_chunk) + len(last_chunk)
|
|
67
121
|
accumulated_content = accumulated_content[:last_chunk_pos]
|
|
68
|
-
parsed_xml_data = self._parse_xml_tool_calls(accumulated_content)
|
|
69
122
|
|
|
123
|
+
parsed_xml_data = self._parse_xml_tool_calls(accumulated_content)
|
|
70
124
|
should_auto_continue = (can_auto_continue and finish_reason == 'length')
|
|
71
125
|
|
|
72
|
-
self.root_span.event(name=f"stream_processor_start[{self.task_no}]({auto_continue_count})",level="DEFAULT",
|
|
126
|
+
self.root_span.event(name=f"stream_processor_start[{self.task_no}]({auto_continue_count})", level="DEFAULT",
|
|
73
127
|
status_message=f"finish_reason={finish_reason}, tool_exec_strategy={self.tool_execution_strategy}, "
|
|
74
128
|
f"parsed_xml_data_len={len(parsed_xml_data)}, accumulated_content={len(accumulated_content)}, "
|
|
75
|
-
f"should_auto_continue={should_auto_continue}")
|
|
129
|
+
f"should_auto_continue={should_auto_continue}, pending_executions={len(pending_tool_executions)}")
|
|
76
130
|
|
|
77
131
|
assistant_msg = None
|
|
78
132
|
if accumulated_content and not should_auto_continue:
|
|
79
133
|
message_data = {"role": "assistant", "content": accumulated_content}
|
|
80
|
-
assistant_msg = self.add_response_message(type="assistant", content=message_data,
|
|
81
|
-
is_llm_message=True)
|
|
134
|
+
assistant_msg = self.add_response_message(type="assistant", content=message_data, is_llm_message=True)
|
|
82
135
|
yield assistant_msg
|
|
83
136
|
|
|
137
|
+
# Process results from both streaming and non-streaming executions
|
|
84
138
|
tool_calls_to_execute = [item['tool_call'] for item in parsed_xml_data]
|
|
85
|
-
if len(tool_calls_to_execute) > 0:
|
|
86
|
-
tool_results = await self._execute_tools(tool_calls_to_execute, self.tool_execution_strategy)
|
|
87
139
|
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
assistant_msg_id = assistant_msg['message_id'] if assistant_msg else None
|
|
140
|
+
# Update assistant_message_id for streaming tool contexts
|
|
141
|
+
assistant_msg_id = assistant_msg['message_id'] if assistant_msg else None
|
|
142
|
+
for execution in pending_tool_executions:
|
|
143
|
+
if not execution["context"].assistant_message_id:
|
|
144
|
+
execution["context"].assistant_message_id = assistant_msg_id
|
|
94
145
|
|
|
95
|
-
|
|
96
|
-
|
|
146
|
+
if len(tool_calls_to_execute) > 0:
|
|
147
|
+
if self.tool_execute_on_stream:
|
|
148
|
+
# Handle results from streaming executions + any remaining tools
|
|
149
|
+
remaining_tools = []
|
|
150
|
+
streamed_tool_indices = set()
|
|
151
|
+
|
|
152
|
+
# Identify which tools were already executed during streaming by index
|
|
153
|
+
for execution in pending_tool_executions:
|
|
154
|
+
streamed_tool_indices.add(execution["tool_index"])
|
|
155
|
+
|
|
156
|
+
# Find remaining tools that weren't executed during streaming
|
|
157
|
+
for i, parsed_item in enumerate(parsed_xml_data):
|
|
158
|
+
tool_call = parsed_item['tool_call']
|
|
159
|
+
tool_identifier = (tool_call.get('function_name', ''), str(tool_call.get('arguments', {})))
|
|
160
|
+
|
|
161
|
+
# Check if this tool was already executed during streaming
|
|
162
|
+
already_executed = False
|
|
163
|
+
for execution in pending_tool_executions:
|
|
164
|
+
exec_tool_call = execution["tool_call"]
|
|
165
|
+
exec_identifier = (exec_tool_call.get('function_name', ''),str(exec_tool_call.get('arguments', {})))
|
|
166
|
+
if tool_identifier == exec_identifier:
|
|
167
|
+
already_executed = True
|
|
168
|
+
break
|
|
169
|
+
|
|
170
|
+
if not already_executed:
|
|
171
|
+
remaining_tools.append((parsed_item['tool_call'], parsed_item['parsing_details'], tool_index))
|
|
172
|
+
tool_index += 1
|
|
173
|
+
|
|
174
|
+
# Execute remaining tools if any
|
|
175
|
+
if remaining_tools:
|
|
176
|
+
for tool_call, parsing_details, t_idx in remaining_tools:
|
|
177
|
+
tool_context = self._create_tool_context(tool_call, t_idx, assistant_msg_id,parsing_details)
|
|
178
|
+
|
|
179
|
+
tool_start_msg = self._add_tool_start_message(tool_context)
|
|
180
|
+
yield format_for_yield(tool_start_msg)
|
|
181
|
+
|
|
182
|
+
result = await self._execute_tool(tool_call)
|
|
183
|
+
tool_context.result = result
|
|
184
|
+
tool_results_buffer.append((tool_call, result, t_idx, tool_context))
|
|
185
|
+
|
|
186
|
+
# Process all tool results
|
|
187
|
+
for tool_call, result, t_idx, context in tool_results_buffer:
|
|
188
|
+
tool_message = self._add_tool_messsage(tool_call, result, self.xml_adding_strategy,assistant_msg_id,
|
|
189
|
+
getattr(context, 'parsing_details', None))
|
|
190
|
+
|
|
191
|
+
tool_completed_msg = self._add_tool_completed_message(context,tool_message['message_id'] if tool_message else None)
|
|
192
|
+
yield format_for_yield(tool_completed_msg)
|
|
193
|
+
|
|
194
|
+
if tool_message:
|
|
195
|
+
yield format_for_yield(tool_message)
|
|
196
|
+
|
|
197
|
+
if tool_completed_msg["metadata"].get("agent_should_terminate") == "true":
|
|
198
|
+
finish_reason = "completed"
|
|
199
|
+
break
|
|
200
|
+
else: # non-streaming execution
|
|
201
|
+
tool_results = await self._execute_tools(tool_calls_to_execute, self.tool_execution_strategy)
|
|
202
|
+
tool_index = 0
|
|
203
|
+
for i, (returned_tool_call, tool_result) in enumerate(tool_results):
|
|
204
|
+
parsed_xml_item = parsed_xml_data[i]
|
|
205
|
+
tool_call = parsed_xml_item['tool_call']
|
|
206
|
+
parsing_details = parsed_xml_item['parsing_details']
|
|
97
207
|
|
|
98
|
-
|
|
99
|
-
yield format_for_yield(tool_start_msg)
|
|
208
|
+
tool_context = self._create_tool_context(tool_call, tool_index, assistant_msg_id,parsing_details, tool_result)
|
|
100
209
|
|
|
101
|
-
|
|
210
|
+
tool_start_msg = self._add_tool_start_message(tool_context)
|
|
211
|
+
yield format_for_yield(tool_start_msg)
|
|
102
212
|
|
|
103
|
-
|
|
104
|
-
yield format_for_yield(tool_completed_msg)
|
|
213
|
+
tool_message = self._add_tool_messsage(tool_call, tool_result, self.xml_adding_strategy,assistant_msg_id, parsing_details)
|
|
105
214
|
|
|
106
|
-
|
|
215
|
+
tool_completed_msg = self._add_tool_completed_message(tool_context, tool_message['message_id'])
|
|
216
|
+
yield format_for_yield(tool_completed_msg)
|
|
107
217
|
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
218
|
+
yield format_for_yield(tool_message)
|
|
219
|
+
|
|
220
|
+
if tool_completed_msg["metadata"].get("agent_should_terminate") == "true":
|
|
221
|
+
finish_reason = "completed"
|
|
222
|
+
break
|
|
111
223
|
|
|
112
|
-
|
|
224
|
+
tool_index += 1
|
|
113
225
|
else:
|
|
114
226
|
finish_reason = "non_tool_call"
|
|
115
227
|
logging.warning(f"StreamResp: tool_calls is empty, No Tool need to call !")
|
|
@@ -133,4 +245,5 @@ class StreamTaskResponser(TaskResponseProcessor):
|
|
|
133
245
|
if should_auto_continue:
|
|
134
246
|
continuous_state['accumulated_content'] = accumulated_content
|
|
135
247
|
continuous_state['assistant_msg_sequence'] = sequence
|
|
136
|
-
logging.warning(
|
|
248
|
+
logging.warning(
|
|
249
|
+
f"StreamResp: Updated continuous state for auto-continue with {len(accumulated_content)} chars")
|
xgae/engine/task_engine.py
CHANGED
|
@@ -344,7 +344,8 @@ class XGATaskEngine:
|
|
|
344
344
|
"model_name": self.model_name,
|
|
345
345
|
"max_xml_tool_calls": 0,
|
|
346
346
|
"use_assistant_chunk_msg": self.use_assistant_chunk_msg,
|
|
347
|
-
"tool_execution_strategy": "parallel" if self.tool_exec_parallel else "sequential",
|
|
347
|
+
"tool_execution_strategy": "parallel" if self.tool_exec_parallel else "sequential",
|
|
348
|
+
"tool_execute_on_stream": False,
|
|
348
349
|
"xml_adding_strategy": "user_message",
|
|
349
350
|
"add_response_msg_func": self.add_response_message,
|
|
350
351
|
"create_response_msg_func": self.create_response_message,
|
|
@@ -396,7 +397,7 @@ if __name__ == "__main__":
|
|
|
396
397
|
setup_logging()
|
|
397
398
|
|
|
398
399
|
async def main():
|
|
399
|
-
# Before Run Exec: uv run
|
|
400
|
+
# Before Run Exec: uv run example-fault-tools
|
|
400
401
|
tool_box = XGAMcpToolBox(custom_mcp_server_file="mcpservers/custom_servers.json")
|
|
401
402
|
system_prompt = read_file("templates/example/fault_user_prompt.txt")
|
|
402
403
|
engine = XGATaskEngine(tool_box=tool_box,
|
|
@@ -1,13 +1,13 @@
|
|
|
1
1
|
xgae/__init__.py,sha256=OEUd9y9AoGBd3xYerdTTpz9xl4NWkmXeq1a2eil7Qro,72
|
|
2
2
|
xgae/cli_app.py,sha256=vKuCIJw0gwXdtkT-QNCZKt2dE53thvTFwQr7nTgvaPY,3000
|
|
3
3
|
xgae/engine/engine_base.py,sha256=-QZqLRbQdwRUfbY4l3i7dFfMB-BL267a-wGZR9bMPLc,1662
|
|
4
|
-
xgae/engine/mcp_tool_box.py,sha256=
|
|
4
|
+
xgae/engine/mcp_tool_box.py,sha256=ot1lpMw77e6VLe4UPCJw3TxcjIXBqzg_qphdmHmW2XQ,10414
|
|
5
5
|
xgae/engine/prompt_builder.py,sha256=X9bS7YIms6LYplCpNHeUmi74xFP5MwFXmXNqOt1Xz-Q,4356
|
|
6
|
-
xgae/engine/task_engine.py,sha256=
|
|
6
|
+
xgae/engine/task_engine.py,sha256=ayv4DTwDArMRQ_CPBxt8tdk7E7JPB0Osb5jxlikMsxM,21052
|
|
7
7
|
xgae/engine/task_langfuse.py,sha256=b0aJ_Di-WDcYzi0TFCvcKWxkBz7PYP2jx3N52OptQMs,2349
|
|
8
|
-
xgae/engine/responser/non_stream_responser.py,sha256=
|
|
9
|
-
xgae/engine/responser/responser_base.py,sha256=
|
|
10
|
-
xgae/engine/responser/stream_responser.py,sha256=
|
|
8
|
+
xgae/engine/responser/non_stream_responser.py,sha256=_DkyBZ50N1Tm8K0W16MD5vgx2Eu_amt2LgfN_7Ej1Us,5542
|
|
9
|
+
xgae/engine/responser/responser_base.py,sha256=mgioPh1atCd5PueumyxAN7eB87SB7wOSB_mLTI-6lIg,25250
|
|
10
|
+
xgae/engine/responser/stream_responser.py,sha256=AVEs0pkLZNPzXcp5A1iFNjDb-aUy7_NWRqThCZqmIyQ,14852
|
|
11
11
|
xgae/tools/without_general_tools_app.py,sha256=FGMV6njcOKwwfitc0j_nUov0RC-eWlhO1IP8_KHz1tQ,3788
|
|
12
12
|
xgae/utils/__init__.py,sha256=ElaGS-zdeZeu6is41u3Ny7lkvhg7BDSK-jMNg9j6K5A,499
|
|
13
13
|
xgae/utils/json_helpers.py,sha256=ubp-dOCeROnZv7JHARRdmDIO5Npdwzrt8AWo3SMv0kI,4705
|
|
@@ -15,7 +15,7 @@ xgae/utils/llm_client.py,sha256=6e3kzx73QN6z2SYMQQFmrmODj2Rk-GPJYIxBcFZhMQE,1436
|
|
|
15
15
|
xgae/utils/misc.py,sha256=aMWOvJ9VW52q-L9Lkjl1hvXqLwpJAmyxA-Z8jzqFG0U,907
|
|
16
16
|
xgae/utils/setup_env.py,sha256=MqNG0c2QQBDFU1kI8frxr9kB5d08Mmi3QZ1OoorgIa0,2662
|
|
17
17
|
xgae/utils/xml_tool_parser.py,sha256=I9xAZC_ElwBY19PNUq-WLXe9FSIJMeAv2Xs-VlajI7Y,4782
|
|
18
|
-
xgae-0.1.
|
|
19
|
-
xgae-0.1.
|
|
20
|
-
xgae-0.1.
|
|
21
|
-
xgae-0.1.
|
|
18
|
+
xgae-0.1.14.dist-info/METADATA,sha256=WGVnO2sWvP3EM98tObwlafmkfIRHl2oSTUn5rBsZ3HM,310
|
|
19
|
+
xgae-0.1.14.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
20
|
+
xgae-0.1.14.dist-info/entry_points.txt,sha256=SWN01JNAncV0oApEvFzpH0wsXfnFlB1adCH4IrAJxGc,163
|
|
21
|
+
xgae-0.1.14.dist-info/RECORD,,
|
|
File without changes
|