xgae 0.1.15__tar.gz → 0.1.17__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of xgae might be problematic. Click here for more details.

Files changed (43) hide show
  1. {xgae-0.1.15 → xgae-0.1.17}/.env +2 -2
  2. {xgae-0.1.15 → xgae-0.1.17}/PKG-INFO +1 -1
  3. {xgae-0.1.15 → xgae-0.1.17}/pyproject.toml +1 -1
  4. xgae-0.1.17/release.md +34 -0
  5. {xgae-0.1.15 → xgae-0.1.17}/src/xgae/cli_app.py +2 -2
  6. {xgae-0.1.15 → xgae-0.1.17}/src/xgae/engine/mcp_tool_box.py +6 -6
  7. {xgae-0.1.15 → xgae-0.1.17}/src/xgae/engine/prompt_builder.py +1 -2
  8. {xgae-0.1.15 → xgae-0.1.17}/src/xgae/engine/responser/non_stream_responser.py +5 -8
  9. {xgae-0.1.15 → xgae-0.1.17}/src/xgae/engine/responser/responser_base.py +21 -24
  10. xgae-0.1.17/src/xgae/engine/responser/stream_responser.py +137 -0
  11. {xgae-0.1.15 → xgae-0.1.17}/src/xgae/engine/task_engine.py +10 -11
  12. {xgae-0.1.15 → xgae-0.1.17}/src/xgae/engine/task_langfuse.py +1 -1
  13. {xgae-0.1.15 → xgae-0.1.17}/src/xgae/tools/without_general_tools_app.py +2 -0
  14. {xgae-0.1.15 → xgae-0.1.17}/src/xgae/utils/xml_tool_parser.py +2 -2
  15. {xgae-0.1.15 → xgae-0.1.17}/uv.lock +1 -1
  16. xgae-0.1.15/release.md +0 -12
  17. xgae-0.1.15/src/xgae/engine/responser/stream_responser.py +0 -256
  18. {xgae-0.1.15 → xgae-0.1.17}/.python-version +0 -0
  19. {xgae-0.1.15 → xgae-0.1.17}/README.md +0 -0
  20. {xgae-0.1.15 → xgae-0.1.17}/mcpservers/custom_servers.json +0 -0
  21. {xgae-0.1.15 → xgae-0.1.17}/mcpservers/xga_server.json +0 -0
  22. {xgae-0.1.15 → xgae-0.1.17}/mcpservers/xga_server_sse.json +0 -0
  23. {xgae-0.1.15 → xgae-0.1.17}/src/examples/agent/langgraph/react/react_agent.py +0 -0
  24. {xgae-0.1.15 → xgae-0.1.17}/src/examples/engine/run_general_tools.py +0 -0
  25. {xgae-0.1.15 → xgae-0.1.17}/src/examples/engine/run_human_in_loop.py +0 -0
  26. {xgae-0.1.15 → xgae-0.1.17}/src/examples/engine/run_simple.py +0 -0
  27. {xgae-0.1.15 → xgae-0.1.17}/src/examples/engine/run_user_prompt.py +0 -0
  28. {xgae-0.1.15 → xgae-0.1.17}/src/examples/tools/custom_fault_tools_app.py +0 -0
  29. {xgae-0.1.15 → xgae-0.1.17}/src/xgae/__init__.py +0 -0
  30. {xgae-0.1.15 → xgae-0.1.17}/src/xgae/engine/engine_base.py +0 -0
  31. {xgae-0.1.15 → xgae-0.1.17}/src/xgae/utils/__init__.py +0 -0
  32. {xgae-0.1.15 → xgae-0.1.17}/src/xgae/utils/json_helpers.py +0 -0
  33. {xgae-0.1.15 → xgae-0.1.17}/src/xgae/utils/llm_client.py +0 -0
  34. {xgae-0.1.15 → xgae-0.1.17}/src/xgae/utils/misc.py +0 -0
  35. {xgae-0.1.15 → xgae-0.1.17}/src/xgae/utils/setup_env.py +0 -0
  36. {xgae-0.1.15 → xgae-0.1.17}/templates/custom_tool_prompt_template.txt +0 -0
  37. {xgae-0.1.15 → xgae-0.1.17}/templates/example/fault_user_prompt.txt +0 -0
  38. {xgae-0.1.15 → xgae-0.1.17}/templates/gemini_system_prompt_template.txt +0 -0
  39. {xgae-0.1.15 → xgae-0.1.17}/templates/general_tool_prompt_template.txt +0 -0
  40. {xgae-0.1.15 → xgae-0.1.17}/templates/system_prompt_response_sample.txt +0 -0
  41. {xgae-0.1.15 → xgae-0.1.17}/templates/system_prompt_template.txt +0 -0
  42. {xgae-0.1.15 → xgae-0.1.17}/test/test_langfuse.py +0 -0
  43. {xgae-0.1.15 → xgae-0.1.17}/test/test_litellm_langfuse.py +0 -0
@@ -1,5 +1,5 @@
1
1
  # LOG
2
- LOG_LEVEL=DEBUG
2
+ LOG_LEVEL=INFO
3
3
  LOG_FILE=log/xgae.log
4
4
  LOG_ENABLE=True
5
5
 
@@ -16,7 +16,7 @@ LLM_API_BASE=https://dashscope.aliyuncs.com/compatible-mode/v1
16
16
  LLM_API_KEY=
17
17
  LLM_MAX_TOKENS=16384
18
18
  LLM_TEMPERATURE=0
19
- LLM_MAX_RETRIES=2
19
+ LLM_MAX_RETRIES=1
20
20
  LLM_STREAM=True
21
21
  LLM_ENABLE_THINKING=False
22
22
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: xgae
3
- Version: 0.1.15
3
+ Version: 0.1.17
4
4
  Summary: Extreme General Agent Engine
5
5
  Requires-Python: >=3.13
6
6
  Requires-Dist: colorlog==6.9.0
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "xgae"
3
- version = "0.1.15"
3
+ version = "0.1.17"
4
4
  description = "Extreme General Agent Engine"
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.13"
xgae-0.1.17/release.md ADDED
@@ -0,0 +1,34 @@
1
+ # Release Changelog
2
+ ## [0.1.17] - 2025-9-1
3
+ ### Target
4
+ - Saved for XGATaskEngine base version
5
+ ### Changed
6
+ - Delete StreamTaskResponser tool_exec_on_stream model code
7
+
8
+
9
+ ## [0.1.15] - 2025-9-1
10
+ ### Target
11
+ - Saved for StreamResponser tool_exec_on_stream mode, next release will be abolished
12
+ ### Changed
13
+ - Refact TaskResponseProcessor, XGATaskEngine
14
+ ### Fixed
15
+ - Fix finish_reason judge logic
16
+
17
+
18
+ ## [0.1.14] - 2025-8-31
19
+ ### Target
20
+ - First complete version is merged
21
+ ### Changed
22
+ - StreamTaskResponser first version
23
+
24
+ ## [0.1.10] - 2025-8-28
25
+ ### Target
26
+ - NonStream mode release is completed
27
+ ### Changed
28
+ - StreamTaskResponser is original
29
+ - NonStreamTaskResponser first version is completed
30
+ - Langfuse use 2.x, match for LiteLLM package
31
+
32
+ ## [0.1.7] - 2025-8-25
33
+ ### Target
34
+ - Langfuse use 3.x package
@@ -56,7 +56,7 @@ async def cli() -> None:
56
56
  trace_id = langfuse.trace(name="xgae_cli").trace_id
57
57
 
58
58
  final_result = await engine.run_task_with_final_answer(
59
- task_message={"role": "user", "content": user_message},
59
+ task_message={'role': "user", 'content': user_message},
60
60
  trace_id=trace_id
61
61
  )
62
62
 
@@ -65,7 +65,7 @@ async def cli() -> None:
65
65
  print(f"\n📌 ASK INFO: {final_result['content']}")
66
66
  user_message = get_user_message("Enter ASK information (or 'exit' to quit)")
67
67
  final_result = await engine.run_task_with_final_answer(
68
- task_message={"role": "user", "content": user_message},
68
+ task_message={'role': "user", 'content': user_message},
69
69
  trace_id=trace_id
70
70
  )
71
71
 
@@ -78,7 +78,7 @@ class XGAMcpToolBox(XGAToolBox):
78
78
  async def destroy_task_tool_box(self, task_id: str):
79
79
  tool_schemas = self.get_task_tool_schemas(task_id, type="general_tool")
80
80
  if len(tool_schemas) > 0:
81
- await self.call_tool(task_id, "end_task", {"task_id": task_id})
81
+ await self.call_tool(task_id, "end_task", {'task_id': task_id})
82
82
  self.task_tool_schemas.pop(task_id, None)
83
83
 
84
84
  @override
@@ -118,7 +118,7 @@ class XGAMcpToolBox(XGAToolBox):
118
118
  if mcp_tool:
119
119
  tool_args = args or {}
120
120
  if server_name == self.GENERAL_MCP_SERVER_NAME:
121
- tool_args = dict({"task_id": task_id}, **tool_args)
121
+ tool_args = dict({'task_id': task_id}, **tool_args)
122
122
  is_general_tool = True
123
123
 
124
124
  try:
@@ -152,9 +152,9 @@ class XGAMcpToolBox(XGAToolBox):
152
152
  input_schema['properties'].pop("task_id", None)
153
153
  if 'task_id' in input_schema['required']:
154
154
  input_schema['required'].remove('task_id')
155
- params_properties = input_schema.get("properties", {})
155
+ params_properties = input_schema.get('properties', {})
156
156
  for param_properties in params_properties.values():
157
- param_properties.pop("title", None)
157
+ param_properties.pop('title', None)
158
158
 
159
159
  metadata = tool.metadata or {}
160
160
  tool_schema = XGAToolSchema(tool_name=tool.name,
@@ -173,10 +173,10 @@ class XGAMcpToolBox(XGAToolBox):
173
173
  def _load_mcp_servers_config(mcp_config_path: str) -> Dict[str, Any]:
174
174
  try:
175
175
  if os.path.exists(mcp_config_path):
176
- with open(mcp_config_path, 'r', encoding='utf-8') as f:
176
+ with open(mcp_config_path, 'r', encoding="utf-8") as f:
177
177
  server_config = json.load(f)
178
178
 
179
- for server_name, server_info in server_config["mcpServers"].items():
179
+ for server_name, server_info in server_config['mcpServers'].items():
180
180
  if "transport" not in server_info:
181
181
  if "url" in server_info:
182
182
  server_info['transport'] = "streamable_http" if "mcp" in server_info['url'] else "sse"
@@ -40,7 +40,6 @@ class XGAPromptBuilder():
40
40
 
41
41
  openai_function['name'] = tool_schema.tool_name
42
42
  openai_function['description'] = tool_schema.description if tool_schema.description else 'No description available'
43
-
44
43
  openai_parameters = {}
45
44
  openai_function['parameters'] = openai_parameters
46
45
 
@@ -70,7 +69,7 @@ class XGAPromptBuilder():
70
69
  for tool_schema in tool_schemas:
71
70
  description = tool_schema.description if tool_schema.description else 'No description available'
72
71
  tool_info += f"- **{tool_schema.tool_name}**: {description}\n"
73
- parameters = tool_schema.input_schema.get("properties", {})
72
+ parameters = tool_schema.input_schema.get('properties', {})
74
73
  tool_info += f" Parameters: {parameters}\n"
75
74
  tool_prompt = tool_prompt.replace("{tool_schemas}", tool_info)
76
75
 
@@ -20,7 +20,7 @@ class NonStreamTaskResponser(TaskResponseProcessor):
20
20
  llm_content = ""
21
21
  parsed_xml_data = []
22
22
  finish_reason = None
23
- llm_count = continuous_state.get("auto_continue_count")
23
+ auto_continue_count = continuous_state['auto_continue_count']
24
24
 
25
25
  try:
26
26
  if hasattr(llm_response, 'choices') and llm_response.choices:
@@ -40,13 +40,10 @@ class NonStreamTaskResponser(TaskResponseProcessor):
40
40
  parsed_xml_data = parsed_xml_data[:self.max_xml_tool_calls]
41
41
  finish_reason = "xml_tool_limit_reached"
42
42
 
43
- self.root_span.event(name=f"non_stream_processor_start[{self.task_no}]({llm_count})", level="DEFAULT",
43
+ self.root_span.event(name=f"non_stream_processor_start[{self.task_no}]({auto_continue_count})", level="DEFAULT",
44
44
  status_message=f"finish_reason={finish_reason}, tool_exec_strategy={self.tool_exec_strategy}, "
45
45
  f"parsed_xml_data_len={len(parsed_xml_data)}, llm_content_len={len(llm_content)}")
46
46
 
47
- if len(llm_content) == 0:
48
- logging.warning(f"NonStreamResp: LLM response_message llm_content is empty")
49
-
50
47
  message_data = {"role": "assistant", "content": llm_content}
51
48
  assistant_msg = self.add_response_message(type="assistant", content=message_data, is_llm_message=True)
52
49
  yield assistant_msg
@@ -81,10 +78,10 @@ class NonStreamTaskResponser(TaskResponseProcessor):
81
78
  tool_index += 1
82
79
  else:
83
80
  finish_reason = "non_tool_call"
84
- logging.warning(f"NonStreamResp: tool_calls is empty, No Tool need to call !")
81
+ logging.warning(f"NonStreamResp: finish_reason='non_tool_call', No Tool need to call !")
85
82
 
86
83
  if finish_reason:
87
- finish_content = {"status_type": "finish", "finish_reason": finish_reason}
84
+ finish_content = {'status_type': "finish", 'finish_reason': finish_reason}
88
85
  finish_msg = self.add_response_message(type="status", content=finish_content, is_llm_message=False)
89
86
  yield finish_msg
90
87
  except Exception as e:
@@ -93,7 +90,7 @@ class NonStreamTaskResponser(TaskResponseProcessor):
93
90
  status_message=f"Process non-streaming response error: {e}",
94
91
  metadata={"content": llm_content, "trace": trace})
95
92
 
96
- content = {"role": "system", "status_type": "error", "message": f"Process non-streaming response error: {e}"}
93
+ content = {'role': "system", 'status_type': "error", 'message': f"Process non-streaming response error: {e}"}
97
94
  error_msg = self.add_response_message(type="status", content=content, is_llm_message=False)
98
95
  yield error_msg
99
96
 
@@ -29,7 +29,6 @@ class TaskResponserContext(TypedDict, total=False):
29
29
  max_xml_tool_calls: int # LLM generate max_xml_tool limit, 0 is no limit
30
30
  use_assistant_chunk_msg: bool
31
31
  tool_exec_strategy: ToolExecutionStrategy
32
- tool_exec_on_stream: bool
33
32
  xml_adding_strategy: XmlAddingStrategy
34
33
  add_response_msg_func: Callable
35
34
  create_response_msg_func: Callable
@@ -65,7 +64,6 @@ class TaskResponseProcessor(ABC):
65
64
  self.task_run_id = response_context['task_run_id']
66
65
  self.task_no = response_context['task_no']
67
66
  self.tool_exec_strategy = response_context['tool_exec_strategy']
68
- self.tool_exec_on_stream = response_context['tool_exec_on_stream']
69
67
  self.xml_adding_strategy = response_context['xml_adding_strategy']
70
68
  self.max_xml_tool_calls = response_context['max_xml_tool_calls']
71
69
 
@@ -265,7 +263,7 @@ class TaskResponseProcessor(ABC):
265
263
  function_name = tool_call['function_name']
266
264
  exec_tool_span = self.root_span.span(name=f"execute_tool.{function_name}", input=tool_call["arguments"])
267
265
  try:
268
- arguments = tool_call["arguments"]
266
+ arguments = tool_call.get('arguments', {})
269
267
  if isinstance(arguments, str):
270
268
  try:
271
269
  arguments = safe_json_parse(arguments)
@@ -321,7 +319,7 @@ class TaskResponseProcessor(ABC):
321
319
  logging.warning("TaskProcessor execute_tools_sequentially: tool_calls is empty")
322
320
  return []
323
321
 
324
- tool_names = [t.get('function_name', 'unknown') for t in tool_calls]
322
+ tool_names = [tc['function_name'] for tc in tool_calls]
325
323
  tool_num = len(tool_calls)
326
324
  if tool_num > 1:
327
325
  logging.info(f"TaskProcessor execute_tools_sequentially: Executing {tool_num} tools sequentially: {tool_names}")
@@ -336,7 +334,7 @@ class TaskResponseProcessor(ABC):
336
334
  results.append((tool_call, result))
337
335
 
338
336
  # Check if this is a terminating tool (ask or complete)
339
- if tool_name in ['ask', 'complete']:
337
+ if tool_name in ["ask", "complete"]:
340
338
  if len(results) < tool_num:
341
339
  logging.info(f"TaskProcessor execute_tools_sequentially: Terminating tool '{tool_name}' executed, Stopping further tool execution.")
342
340
  self.root_span.event(name="task_process_terminate_tool_executed", level="DEFAULT",
@@ -360,7 +358,7 @@ class TaskResponseProcessor(ABC):
360
358
  logging.warning("TaskProcessor execute_tools_in_parallel: tool_calls is empty")
361
359
  return []
362
360
 
363
- tool_names = [t['function_name'] for t in tool_calls]
361
+ tool_names = [tc['function_name'] for tc in tool_calls]
364
362
  tool_num = len(tool_calls)
365
363
  if tool_num > 1:
366
364
  logging.info(f"TaskProcessor execute_tools_in_parallel: Executing {tool_num} tools sequentially: {tool_names}")
@@ -402,10 +400,10 @@ class TaskResponseProcessor(ABC):
402
400
 
403
401
  metadata = {}
404
402
  if assistant_message_id:
405
- metadata["assistant_message_id"] = assistant_message_id
403
+ metadata['assistant_message_id'] = assistant_message_id
406
404
 
407
405
  if parsing_details:
408
- metadata["parsing_details"] = parsing_details
406
+ metadata['parsing_details'] = parsing_details
409
407
 
410
408
  metadata['frontend_content'] = result_for_frontend
411
409
 
@@ -424,9 +422,8 @@ class TaskResponseProcessor(ABC):
424
422
  parsing_details: Optional[Dict[str, Any]] = None,
425
423
  for_llm: bool = False) -> Dict[str, Any]:
426
424
  function_name = tool_call['function_name']
427
- xml_tag_name = tool_call.get("xml_tag_name")
428
- arguments = tool_call.get("arguments", {})
429
- tool_call_id = tool_call.get("id")
425
+ xml_tag_name = tool_call['xml_tag_name']
426
+ arguments = tool_call.get('arguments', {})
430
427
 
431
428
  # Process the output - if it's a JSON string, parse it back to an object
432
429
  output = result.output
@@ -499,17 +496,17 @@ class TaskResponseProcessor(ABC):
499
496
  message_text = f"Tool {context.function_name} {'completed successfully' if context.result.success else 'failed'}"
500
497
 
501
498
  content = {
502
- "status_type" : status_type,
503
- "role" : "assistant",
504
- "function_name" : context.function_name,
505
- "xml_tag_name" : context.xml_tag_name,
506
- "message" : message_text,
507
- "tool_index" : context.tool_index
499
+ 'status_type' : status_type,
500
+ 'role' : "assistant",
501
+ 'function_name' : context.function_name,
502
+ 'xml_tag_name' : context.xml_tag_name,
503
+ 'message' : message_text,
504
+ 'tool_index' : context.tool_index
508
505
  }
509
506
 
510
507
  metadata = {}
511
508
  if tool_message_id:
512
- metadata["tool_result_message_id"] = tool_message_id
509
+ metadata['tool_result_message_id'] = tool_message_id
513
510
 
514
511
  return self.add_response_message(type="status", content=content, is_llm_message=False, metadata=metadata)
515
512
 
@@ -517,12 +514,12 @@ class TaskResponseProcessor(ABC):
517
514
  """Formats, saves, and returns a tool error status message."""
518
515
  error_msg = str(context.error) if context.error else "Tool execution unknown exception"
519
516
  content = {
520
- "status_type" : "tool_error",
521
- "role" : "assistant",
522
- "function_name" : context.function_name,
523
- "xml_tag_name" : context.xml_tag_name,
524
- "message" : f"Executing tool {context.function_name} exception: {error_msg}",
525
- "tool_index" : context.tool_index
517
+ 'status_type' : "tool_error",
518
+ 'role' : "assistant",
519
+ 'function_name' : context.function_name,
520
+ 'xml_tag_name' : context.xml_tag_name,
521
+ 'message' : f"Executing tool {context.function_name} exception: {error_msg}",
522
+ 'tool_index' : context.tool_index
526
523
  }
527
524
 
528
525
  return self.add_response_message(type="status", content=content, is_llm_message=False)
@@ -0,0 +1,137 @@
1
+ import logging
2
+ import asyncio
3
+ from typing import List, Dict, Any, Optional, AsyncGenerator, override
4
+
5
+ from xgae.utils import log_trace
6
+
7
+ from xgae.engine.responser.responser_base import TaskResponseProcessor, TaskResponserContext, TaskRunContinuousState
8
+
9
+
10
+ class StreamTaskResponser(TaskResponseProcessor):
11
+ def __init__(self, response_context: TaskResponserContext):
12
+ super().__init__(response_context)
13
+
14
+ @override
15
+ async def process_response(self,
16
+ llm_response: AsyncGenerator,
17
+ prompt_messages: List[Dict[str, Any]],
18
+ continuous_state: TaskRunContinuousState
19
+ ) -> AsyncGenerator[Dict[str, Any], None]:
20
+ accumulated_content = continuous_state['accumulated_content']
21
+ auto_continue_count = continuous_state['auto_continue_count']
22
+ can_auto_continue = continuous_state['auto_continue']
23
+ msg_sequence = continuous_state['assistant_msg_sequence']
24
+
25
+ use_assistant_chunk_msg = self.response_context["use_assistant_chunk_msg"]
26
+
27
+ finish_reason = None
28
+ should_auto_continue = False
29
+
30
+ logging.info(f"=== StreamResp:Start Process Response, assistant_msg_sequence={msg_sequence}, "
31
+ f"accumulated_content_len={len(accumulated_content)}")
32
+ try:
33
+ async for llm_chunk in llm_response:
34
+ if hasattr(llm_chunk, 'choices') and llm_chunk.choices and hasattr(llm_chunk.choices[0],'finish_reason'):
35
+ if llm_chunk.choices[0].finish_reason:
36
+ finish_reason = llm_chunk.choices[0].finish_reason # LLM finish reason: ‘stop' , 'length'
37
+ logging.info(f"StreamResp:LLM chunk response finish_reason={finish_reason}")
38
+
39
+ if hasattr(llm_chunk, 'choices') and llm_chunk.choices:
40
+ llm_chunk_msg = llm_chunk.choices[0].delta if hasattr(llm_chunk.choices[0], 'delta') else None
41
+
42
+ if llm_chunk_msg and hasattr(llm_chunk_msg, 'content') and llm_chunk_msg.content:
43
+ chunk_content = llm_chunk_msg.content
44
+ accumulated_content += chunk_content
45
+
46
+ xml_tool_call_count = len(self._extract_xml_chunks(accumulated_content))
47
+ if self.max_xml_tool_calls <= 0 or xml_tool_call_count <= self.max_xml_tool_calls:
48
+ if use_assistant_chunk_msg:
49
+ message_data = {"role": "assistant", "content": chunk_content}
50
+ metadata = {"sequence": msg_sequence}
51
+ assistant_chunk_msg = self.create_response_message(type="assistant_chunk",
52
+ content=message_data,
53
+ is_llm_message=True,
54
+ metadata=metadata)
55
+ yield assistant_chunk_msg
56
+ msg_sequence += 1
57
+ else:
58
+ finish_reason = "xml_tool_limit_reached"
59
+ logging.warning(f"StreamResp: Over XML Tool Limit, finish_reason='xml_tool_limit_reached', "
60
+ f"xml_tool_call_count={xml_tool_call_count}")
61
+ break
62
+
63
+ if finish_reason == "xml_tool_limit_reached":
64
+ xml_chunks = self._extract_xml_chunks(accumulated_content)
65
+ if len(xml_chunks) > self.max_xml_tool_calls:
66
+ limited_chunks = xml_chunks[:self.max_xml_tool_calls]
67
+ if limited_chunks:
68
+ last_chunk = limited_chunks[-1]
69
+ last_chunk_pos = accumulated_content.find(last_chunk) + len(last_chunk)
70
+ accumulated_content = accumulated_content[:last_chunk_pos]
71
+
72
+ parsed_xml_data = self._parse_xml_tool_calls(accumulated_content)
73
+ should_auto_continue = (can_auto_continue and finish_reason == 'length')
74
+
75
+ self.root_span.event(name=f"stream_processor_start[{self.task_no}]({auto_continue_count})", level="DEFAULT",
76
+ status_message=f"finish_reason={finish_reason}, tool_exec_strategy={self.tool_exec_strategy}, "
77
+ f"parsed_xml_data_len={len(parsed_xml_data)}, accumulated_content_len={len(accumulated_content)}, "
78
+ f"should_auto_continue={should_auto_continue}")
79
+
80
+ assistant_msg = None
81
+ if accumulated_content and not should_auto_continue:
82
+ message_data = {"role": "assistant", "content": accumulated_content}
83
+ assistant_msg = self.add_response_message(type="assistant", content=message_data, is_llm_message=True)
84
+ yield assistant_msg
85
+
86
+ assistant_msg_id = assistant_msg['message_id'] if assistant_msg else None
87
+
88
+ tool_calls_to_execute = [item['tool_call'] for item in parsed_xml_data]
89
+ if len(tool_calls_to_execute) > 0 and not should_auto_continue:
90
+ tool_results = await self._execute_tools(tool_calls_to_execute, self.tool_exec_strategy)
91
+ tool_index = 0
92
+ for i, (returned_tool_call, tool_result) in enumerate(tool_results):
93
+ parsed_xml_item = parsed_xml_data[i]
94
+ tool_call = parsed_xml_item['tool_call']
95
+ parsing_details = parsed_xml_item['parsing_details']
96
+
97
+ tool_context = self._create_tool_context(tool_call, tool_index, assistant_msg_id,parsing_details, tool_result)
98
+
99
+ tool_start_msg = self._add_tool_start_message(tool_context)
100
+ yield tool_start_msg
101
+
102
+ tool_message = self._add_tool_messsage(tool_call, tool_result, self.xml_adding_strategy,assistant_msg_id, parsing_details)
103
+
104
+ tool_completed_msg = self._add_tool_completed_message(tool_context, tool_message['message_id'])
105
+ yield tool_completed_msg
106
+
107
+ yield tool_message
108
+
109
+ if tool_context.function_name in ['ask', 'complete']:
110
+ finish_reason = "completed"
111
+ break
112
+
113
+ tool_index += 1
114
+ else:
115
+ finish_reason = "non_tool_call"
116
+ logging.warning(f"StreamResp: finish_reason='non_tool_call', No Tool need to call !")
117
+
118
+ if finish_reason:
119
+ finish_content = {'status_type': "finish", 'finish_reason': finish_reason}
120
+ finish_msg = self.add_response_message(type="status", content=finish_content, is_llm_message=False)
121
+ yield finish_msg
122
+ except Exception as e:
123
+ trace = log_trace(e, f"StreamResp: Process response accumulated_content:\n {accumulated_content}")
124
+ self.root_span.event(name="stream_response_process_error", level="ERROR",
125
+ status_message=f"Process streaming response error: {e}",
126
+ metadata={"content": accumulated_content, "trace": trace})
127
+
128
+ content = {'role': "system", 'status_type': "error", 'message': f"Process streaming response error: {e}"}
129
+ error_msg = self.add_response_message(type="status", content=content, is_llm_message=False)
130
+ yield error_msg
131
+
132
+ raise # Use bare 'raise' to preserve the original exception with its traceback
133
+ finally:
134
+ if should_auto_continue:
135
+ continuous_state['accumulated_content'] = accumulated_content
136
+ continuous_state['assistant_msg_sequence'] = msg_sequence
137
+ logging.warning(f"StreamResp: Updated continuous state for auto-continue with {len(accumulated_content)} chars")
@@ -152,7 +152,7 @@ class XGATaskEngine:
152
152
  logging.error(f"TaskEngine run_task_auto: task_response error: {chunk.get('message')}")
153
153
  auto_continue = False
154
154
  break
155
- elif status_type == 'finish':
155
+ elif status_type == "finish":
156
156
  finish_reason = status_content['finish_reason']
157
157
  if finish_reason == "completed":
158
158
  logging.info(f"TaskEngine run_task_auto: Detected finish_reason='completed', TASK_COMPLETE Success !")
@@ -167,8 +167,8 @@ class XGATaskEngine:
167
167
  auto_continue = False
168
168
  break
169
169
  elif finish_reason in ["stop", "length"]: # 'length' occur on some LLM
170
- auto_continue = True
171
170
  auto_continue_count += 1
171
+ auto_continue = True if auto_continue_count < self.max_auto_run else False
172
172
  update_continuous_state(auto_continue_count, auto_continue)
173
173
  logging.info(f"TaskEngine run_task_auto: Detected finish_reason='{finish_reason}', auto-continuing ({auto_continue_count}/{self.max_auto_run})")
174
174
  except Exception as parse_error:
@@ -177,7 +177,7 @@ class XGATaskEngine:
177
177
  status_message=f"Task Engine parse chunk error: {parse_error}",
178
178
  metadata={"content": chunk, "trace": trace})
179
179
 
180
- status_content = {"status_type": "error", "role": "system", "message": "Parse response chunk Error"}
180
+ status_content = {'status_type': "error", 'role': "system", 'message': "Parse response chunk Error"}
181
181
  error_msg = self.add_response_message(type="status", content=status_content, is_llm_message=False)
182
182
  yield error_msg
183
183
  except Exception as run_error:
@@ -186,7 +186,7 @@ class XGATaskEngine:
186
186
  status_message=f"Call task_run_once error: {run_error}",
187
187
  metadata={"trace": trace})
188
188
 
189
- status_content = {"status_type": "error", "role": "system", "message": "Call run_task_once error"}
189
+ status_content = {'status_type': "error", 'role': "system", 'message': "Call run_task_once error"}
190
190
  error_msg = self.add_response_message(type="status", content=status_content, is_llm_message=False)
191
191
  yield error_msg
192
192
 
@@ -246,10 +246,10 @@ class XGATaskEngine:
246
246
  # finish reason 1) 'stop': auto run reach max_auto_run limit 2) 'xml_tool_limit_reached' 3) 'length': occur on some LLM
247
247
  tool_result = tool_execution.get("result", None)
248
248
  if tool_result is not None:
249
- success = tool_result.get('success')
249
+ success = tool_result['success']
250
250
  output = tool_result.get('output', '')
251
251
  result_type = "answer" if success else "error"
252
- result_content = f"Task execute '{tool_name}' {result_type}: {output}"
252
+ result_content = output
253
253
  final_result = XGATaskResult(type=result_type, content=result_content)
254
254
  elif chunk_type == "assistant" and finish_reason == "non_tool_call":
255
255
  assis_content = chunk['content']
@@ -345,7 +345,6 @@ class XGATaskEngine:
345
345
  'max_xml_tool_calls' : 0,
346
346
  'use_assistant_chunk_msg' : self.use_assistant_chunk_msg,
347
347
  'tool_exec_strategy' : "parallel" if self.tool_exec_parallel else "sequential",
348
- 'tool_exec_on_stream' : True,
349
348
  'xml_adding_strategy' : "assistant_message", # user_message
350
349
  'add_response_msg_func' : self.add_response_message,
351
350
  'create_response_msg_func' : self.create_response_message,
@@ -404,10 +403,10 @@ if __name__ == "__main__":
404
403
  custom_tools=["*"],
405
404
  system_prompt=system_prompt,
406
405
  session_id="session_1",
407
- agent_id="agent_1",)
408
-
409
- final_result = await engine.run_task_with_final_answer(task_message={'role': "user",
410
- 'content': "locate 10.0.0.1 fault and solution"})
406
+ agent_id="agent_1"
407
+ )
408
+ user_input = "locate 10.0.0.1 fault and solution"
409
+ final_result = await engine.run_task_with_final_answer(task_message={'role': "user", 'content': user_input})
411
410
  print(f"FINAL RESULT:{final_result}")
412
411
 
413
412
 
@@ -42,7 +42,7 @@ class XGATaskLangFuse:
42
42
  trace = XGATaskLangFuse.langfuse.trace(name="xga_task_engine")
43
43
  self.trace_id = trace.id
44
44
 
45
- metadata = {"task_id": self.task_id, "session_id": self.session_id, "agent_id": self.agent_id}
45
+ metadata = {'task_id': self.task_id, 'session_id': self.session_id, 'agent_id': self.agent_id}
46
46
  self.root_span = trace.span(id=self.task_run_id, name=root_span_name, input=task_message,metadata=metadata)
47
47
  self.root_span_name = root_span_name
48
48
 
@@ -32,11 +32,13 @@ async def ask(task_id: str,
32
32
  print(f"<XGAETools-ask>: task_id={task_id}, text={text}, attachments={attachments}")
33
33
  return XGAToolResult(success=True, output=str({"status": "Awaiting user response..."}))
34
34
 
35
+
35
36
  @mcp.tool(
36
37
  description="end task, destroy sandbox"
37
38
  )
38
39
  async def end_task(task_id: str) :
39
40
  print(f"<XGAETools-end_task> task_id: {task_id}")
41
+ return XGAToolResult(success=True, output="")
40
42
 
41
43
 
42
44
 
@@ -6,11 +6,11 @@ the XML format with structured function_calls blocks.
6
6
  """
7
7
 
8
8
  import json
9
- import logging
10
9
  import re
11
10
  from dataclasses import dataclass
12
11
  from typing import List, Dict, Any, Optional, Tuple
13
12
 
13
+ from xgae.utils import log_trace
14
14
 
15
15
  @dataclass
16
16
  class XMLToolCall:
@@ -82,7 +82,7 @@ class XMLToolParser:
82
82
  if tool_call:
83
83
  tool_calls.append(tool_call)
84
84
  except Exception as e:
85
- logging.error(f"Error parsing invoke block for {function_name}: {e}")
85
+ log_trace(e, f"XMLToolParser: Error parsing function={function_name}, invoke_content:\n{invoke_content}")
86
86
 
87
87
  return tool_calls
88
88
 
@@ -1257,7 +1257,7 @@ wheels = [
1257
1257
 
1258
1258
  [[package]]
1259
1259
  name = "xgae"
1260
- version = "0.1.15"
1260
+ version = "0.1.16"
1261
1261
  source = { editable = "." }
1262
1262
  dependencies = [
1263
1263
  { name = "colorlog" },
xgae-0.1.15/release.md DELETED
@@ -1,12 +0,0 @@
1
- # Relese Changelog
2
- ## [0.1.15] - 2025-9-1
3
- ### Target
4
- - Saved for StreamResponser tool_exec_on_stream mode, next release will be abolished
5
- ### Changed
6
- - Improve French translation (#377).
7
- ### Fixed
8
- - Fix finish_reason judge logic
9
-
10
- ## [0.1.15] - 2025-8-31
11
- ### Target
12
- - First complete version
@@ -1,256 +0,0 @@
1
- import logging
2
- import asyncio
3
- from typing import List, Dict, Any, Optional, AsyncGenerator, override
4
-
5
- from xgae.utils import log_trace
6
-
7
- from xgae.engine.responser.responser_base import TaskResponseProcessor, TaskResponserContext, TaskRunContinuousState
8
-
9
-
10
- class StreamTaskResponser(TaskResponseProcessor):
11
- def __init__(self, response_context: TaskResponserContext):
12
- super().__init__(response_context)
13
-
14
- @override
15
- async def process_response(self,
16
- llm_response: AsyncGenerator,
17
- prompt_messages: List[Dict[str, Any]],
18
- continuous_state: TaskRunContinuousState
19
- ) -> AsyncGenerator[Dict[str, Any], None]:
20
- accumulated_content = continuous_state['accumulated_content']
21
- auto_continue_count = continuous_state['auto_continue_count']
22
- can_auto_continue = continuous_state['auto_continue']
23
- msg_sequence = continuous_state['assistant_msg_sequence']
24
-
25
- use_assistant_chunk_msg = self.response_context["use_assistant_chunk_msg"]
26
-
27
- finish_reason = None
28
- should_auto_continue = False
29
-
30
- pending_tool_executions = []
31
- yielded_tool_indices = set() # Track which tool statuses have been yielded
32
- tool_results_buffer = [] # Store (tool_call, result, tool_index, context)
33
- tool_index = 0
34
-
35
- current_xml_content = accumulated_content # Track XML content for streaming detection
36
-
37
- logging.info(f"=== StreamResp:tool_exec_on_stream={self.tool_exec_on_stream}, auto_continue_count={auto_continue_count}, "
38
- f"accumulated_content_len={len(accumulated_content)}")
39
- try:
40
- async for llm_chunk in llm_response:
41
- if hasattr(llm_chunk, 'choices') and llm_chunk.choices and hasattr(llm_chunk.choices[0],'finish_reason'):
42
- if llm_chunk.choices[0].finish_reason:
43
- finish_reason = llm_chunk.choices[0].finish_reason # LLM finish reason: ‘stop' , 'length'
44
- logging.info(f"StreamResp:LLM chunk response finish_reason={finish_reason}")
45
-
46
- if hasattr(llm_chunk, 'choices') and llm_chunk.choices:
47
- llm_chunk_msg = llm_chunk.choices[0].delta if hasattr(llm_chunk.choices[0], 'delta') else None
48
-
49
- if llm_chunk_msg and hasattr(llm_chunk_msg, 'content') and llm_chunk_msg.content:
50
- chunk_content = llm_chunk_msg.content
51
- accumulated_content += chunk_content
52
- current_xml_content += chunk_content #Track streaming XML content
53
-
54
- xml_tool_call_count = len(self._extract_xml_chunks(accumulated_content))
55
- if self.max_xml_tool_calls <= 0 or xml_tool_call_count < self.max_xml_tool_calls:
56
- if use_assistant_chunk_msg:
57
- message_data = {"role": "assistant", "content": chunk_content}
58
- metadata = {"sequence": msg_sequence}
59
- assistant_chunk_msg = self.create_response_message(type="assistant_chunk",
60
- content=message_data,
61
- is_llm_message=True,
62
- metadata=metadata)
63
- yield assistant_chunk_msg
64
- msg_sequence += 1
65
-
66
- #Process XML tool calls during streaming
67
- if self.tool_exec_on_stream:
68
- xml_chunks = self._extract_xml_chunks(current_xml_content)
69
- for xml_chunk in xml_chunks:
70
- current_xml_content = current_xml_content.replace(xml_chunk, "", 1)
71
- result = self._parse_xml_tool_call(xml_chunk)
72
- if result:
73
- tool_call, parsing_details = result
74
-
75
- # Create tool context for streaming execution
76
- tool_context = self._create_tool_context(tool_call, tool_index, None, parsing_details)
77
-
78
- # Yield tool start status immediately
79
- tool_start_msg = self._add_tool_start_message(tool_context)
80
- yield tool_start_msg
81
- yielded_tool_indices.add(tool_index)
82
-
83
- # Create async execution task
84
- execution_task = asyncio.create_task(self._execute_tool(tool_call))
85
- pending_tool_executions.append({'task': execution_task,
86
- 'tool_call': tool_call,
87
- 'tool_index': tool_index,
88
- 'tool_context': tool_context,
89
- 'parsing_details': parsing_details})
90
- tool_index += 1
91
- else:
92
- finish_reason = "xml_tool_limit_reached"
93
- logging.warning(f"StreamResp: Over XML Tool Limit, finish_reason='xml_tool_limit_reached', "
94
- f"xml_tool_call_count={xml_tool_call_count}")
95
- break
96
- # for chunk is end
97
-
98
- if len(accumulated_content) == 0:
99
- logging.warning(f"StreamResp: LLM response_message content is empty")
100
-
101
- # Wait for pending tool executions from streaming phase
102
- if pending_tool_executions:
103
- logging.info(f"StreamResp: Waiting for {len(pending_tool_executions)} pending streamed tool executions")
104
-
105
- pending_tasks = [execution['task'] for execution in pending_tool_executions]
106
- done, _ = await asyncio.wait(pending_tasks)
107
-
108
- for pend_tool_exec in pending_tool_executions:
109
- pend_tool_index = pend_tool_exec['tool_index']
110
- pend_tool_context = pend_tool_exec['tool_context']
111
-
112
- try:
113
- if pend_tool_exec["task"].done():
114
- result = pend_tool_exec['task'].result()
115
- pend_tool_context.result = result
116
- tool_results_buffer.append((pend_tool_exec["tool_call"], result, pend_tool_index, pend_tool_context))
117
- else:
118
- logging.warning(f"StreamResp: Task for tool index {pend_tool_index} is not done after wait.")
119
- except Exception as e:
120
- logging.error(f"StreamResp: Error getting result for pending tool execution {pend_tool_index}: {str(e)}")
121
- pend_tool_context.error = e
122
-
123
- if finish_reason == "xml_tool_limit_reached":
124
- xml_chunks = self._extract_xml_chunks(accumulated_content)
125
- if len(xml_chunks) > self.max_xml_tool_calls:
126
- limited_chunks = xml_chunks[:self.max_xml_tool_calls]
127
- if limited_chunks:
128
- last_chunk = limited_chunks[-1]
129
- last_chunk_pos = accumulated_content.find(last_chunk) + len(last_chunk)
130
- accumulated_content = accumulated_content[:last_chunk_pos]
131
-
132
- parsed_xml_data = self._parse_xml_tool_calls(accumulated_content)
133
- should_auto_continue = (can_auto_continue and finish_reason == 'length')
134
-
135
- self.root_span.event(name=f"stream_processor_start[{self.task_no}]({auto_continue_count})", level="DEFAULT",
136
- status_message=f"finish_reason={finish_reason}, tool_exec_strategy={self.tool_exec_strategy}, "
137
- f"parsed_xml_data_len={len(parsed_xml_data)}, accumulated_content_len={len(accumulated_content)}, "
138
- f"should_auto_continue={should_auto_continue}, pending_executions_len={len(pending_tool_executions)}")
139
-
140
- assistant_msg = None
141
- if accumulated_content and not should_auto_continue:
142
- message_data = {"role": "assistant", "content": accumulated_content}
143
- assistant_msg = self.add_response_message(type="assistant", content=message_data, is_llm_message=True)
144
- yield assistant_msg
145
-
146
- # Process results from both streaming and non-streaming executions
147
- tool_calls_to_execute = [item['tool_call'] for item in parsed_xml_data]
148
-
149
- # Update assistant_message_id for streaming tool contexts
150
- assistant_msg_id = assistant_msg['message_id'] if assistant_msg else None
151
- for pend_tool_exec in pending_tool_executions:
152
- if not pend_tool_exec["tool_context"].assistant_message_id:
153
- pend_tool_exec["tool_context"].assistant_message_id = assistant_msg_id
154
-
155
- if len(tool_calls_to_execute) > 0:
156
- if self.tool_exec_on_stream:
157
- # Handle results from streaming executions + any remaining tools
158
- remaining_tools = []
159
- streamed_tool_indices = set()
160
-
161
- # Identify which tools were already executed during streaming by index
162
- for pend_tool_exec in pending_tool_executions:
163
- streamed_tool_indices.add(pend_tool_exec["tool_index"])
164
-
165
- # Find remaining tools that weren't executed during streaming
166
- for i, parsed_item in enumerate(parsed_xml_data):
167
- tool_call = parsed_item['tool_call']
168
- tool_identifier = (tool_call.get('function_name', ''), str(tool_call.get('arguments', {})))
169
-
170
- # Check if this tool was already executed during streaming
171
- already_executed = False
172
- for pend_tool_exec in pending_tool_executions:
173
- exec_tool_call = pend_tool_exec["tool_call"]
174
- exec_identifier = (exec_tool_call.get('function_name', ''),str(exec_tool_call.get('arguments', {})))
175
- if tool_identifier == exec_identifier:
176
- already_executed = True
177
- break
178
-
179
- if not already_executed:
180
- remaining_tools.append((parsed_item['tool_call'], parsed_item['parsing_details'], tool_index))
181
- tool_index += 1
182
-
183
- # Execute remaining tools if any
184
- if remaining_tools:
185
- for tool_call, parsing_details, t_idx in remaining_tools:
186
- tool_context = self._create_tool_context(tool_call, t_idx, assistant_msg_id,parsing_details)
187
-
188
- tool_start_msg = self._add_tool_start_message(tool_context)
189
- yield tool_start_msg
190
-
191
- result = await self._execute_tool(tool_call)
192
- tool_context.result = result
193
- tool_results_buffer.append((tool_call, result, t_idx, tool_context))
194
-
195
- # Process all tool results
196
- for tool_call, result, t_idx, pend_tool_context in tool_results_buffer:
197
- tool_message = self._add_tool_messsage(tool_call, result, self.xml_adding_strategy,assistant_msg_id,
198
- getattr(pend_tool_context, 'parsing_details', None))
199
-
200
- tool_completed_msg = self._add_tool_completed_message(pend_tool_context,tool_message['message_id'] if tool_message else None)
201
- yield tool_completed_msg
202
-
203
- yield tool_message
204
-
205
- if pend_tool_context.function_name in ['ask', 'complete']:
206
- finish_reason = "completed"
207
- break
208
- else: # non-streaming execution
209
- tool_results = await self._execute_tools(tool_calls_to_execute, self.tool_exec_strategy)
210
- tool_index = 0
211
- for i, (returned_tool_call, tool_result) in enumerate(tool_results):
212
- parsed_xml_item = parsed_xml_data[i]
213
- tool_call = parsed_xml_item['tool_call']
214
- parsing_details = parsed_xml_item['parsing_details']
215
-
216
- tool_context = self._create_tool_context(tool_call, tool_index, assistant_msg_id,parsing_details, tool_result)
217
-
218
- tool_start_msg = self._add_tool_start_message(tool_context)
219
- yield tool_start_msg
220
-
221
- tool_message = self._add_tool_messsage(tool_call, tool_result, self.xml_adding_strategy,assistant_msg_id, parsing_details)
222
-
223
- tool_completed_msg = self._add_tool_completed_message(tool_context, tool_message['message_id'])
224
- yield tool_completed_msg
225
-
226
- yield tool_message
227
-
228
- if tool_context.function_name in ['ask', 'complete']:
229
- finish_reason = "completed"
230
- break
231
-
232
- tool_index += 1
233
- else:
234
- finish_reason = "non_tool_call"
235
- logging.warning(f"StreamResp: tool_calls is empty, No Tool need to call !")
236
-
237
- if finish_reason:
238
- finish_content = {"status_type": "finish", "finish_reason": finish_reason}
239
- finish_msg = self.add_response_message(type="status", content=finish_content, is_llm_message=False)
240
- yield finish_msg
241
- except Exception as e:
242
- trace = log_trace(e, f"StreamResp: Process response accumulated_content:\n {accumulated_content}")
243
- self.root_span.event(name="stream_response_process_error", level="ERROR",
244
- status_message=f"Process streaming response error: {e}",
245
- metadata={"content": accumulated_content, "trace": trace})
246
-
247
- content = {"role": "system", "status_type": "error", "message": f"Process streaming response error: {e}"}
248
- error_msg = self.add_response_message(type="status", content=content, is_llm_message=False)
249
- yield error_msg
250
-
251
- raise # Use bare 'raise' to preserve the original exception with its traceback
252
- finally:
253
- if should_auto_continue:
254
- continuous_state['accumulated_content'] = accumulated_content
255
- continuous_state['assistant_msg_sequence'] = msg_sequence
256
- logging.warning(f"StreamResp: Updated continuous state for auto-continue with {len(accumulated_content)} chars")
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes