xgae 0.1.4__tar.gz → 0.1.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of xgae might be problematic. Click here for more details.

Files changed (35) hide show
  1. {xgae-0.1.4 → xgae-0.1.5}/.idea/workspace.xml +2 -2
  2. {xgae-0.1.4 → xgae-0.1.5}/PKG-INFO +1 -1
  3. {xgae-0.1.4 → xgae-0.1.5}/pyproject.toml +1 -1
  4. {xgae-0.1.4 → xgae-0.1.5}/src/xgae/engine/responser/xga_non_stream_responser.py +5 -2
  5. {xgae-0.1.4 → xgae-0.1.5}/src/xgae/engine/responser/xga_responser_base.py +2 -43
  6. {xgae-0.1.4 → xgae-0.1.5}/src/xgae/engine/responser/xga_stream_responser.py +44 -1
  7. {xgae-0.1.4 → xgae-0.1.5}/src/xgae/engine/xga_base.py +8 -8
  8. {xgae-0.1.4 → xgae-0.1.5}/src/xgae/engine/xga_engine.py +126 -57
  9. {xgae-0.1.4 → xgae-0.1.5}/src/xgae/engine/xga_mcp_tool_box.py +1 -4
  10. {xgae-0.1.4 → xgae-0.1.5}/.env +0 -0
  11. {xgae-0.1.4 → xgae-0.1.5}/.idea/.gitignore +0 -0
  12. {xgae-0.1.4 → xgae-0.1.5}/.idea/inspectionProfiles/Project_Default.xml +0 -0
  13. {xgae-0.1.4 → xgae-0.1.5}/.idea/inspectionProfiles/profiles_settings.xml +0 -0
  14. {xgae-0.1.4 → xgae-0.1.5}/.idea/misc.xml +0 -0
  15. {xgae-0.1.4 → xgae-0.1.5}/.idea/modules.xml +0 -0
  16. {xgae-0.1.4 → xgae-0.1.5}/.idea/vcs.xml +0 -0
  17. {xgae-0.1.4 → xgae-0.1.5}/.idea/xgae.iml +0 -0
  18. {xgae-0.1.4 → xgae-0.1.5}/.python-version +0 -0
  19. {xgae-0.1.4 → xgae-0.1.5}/README.md +0 -0
  20. {xgae-0.1.4 → xgae-0.1.5}/mcpservers/custom_servers.json +0 -0
  21. {xgae-0.1.4 → xgae-0.1.5}/mcpservers/xga_server.json +0 -0
  22. {xgae-0.1.4 → xgae-0.1.5}/src/xgae/__init__.py +0 -0
  23. {xgae-0.1.4 → xgae-0.1.5}/src/xgae/engine/xga_prompt_builder.py +0 -0
  24. {xgae-0.1.4 → xgae-0.1.5}/src/xgae/utils/json_helpers.py +0 -0
  25. {xgae-0.1.4 → xgae-0.1.5}/src/xgae/utils/llm_client.py +0 -0
  26. {xgae-0.1.4 → xgae-0.1.5}/src/xgae/utils/setup_env.py +0 -0
  27. {xgae-0.1.4 → xgae-0.1.5}/src/xgae/utils/utils.py +0 -0
  28. {xgae-0.1.4 → xgae-0.1.5}/src/xgae/utils/xml_tool_parser.py +0 -0
  29. {xgae-0.1.4 → xgae-0.1.5}/templates/custom_tool_prompt_template.txt +0 -0
  30. {xgae-0.1.4 → xgae-0.1.5}/templates/gemini_system_prompt_template.txt +0 -0
  31. {xgae-0.1.4 → xgae-0.1.5}/templates/general_tool_prompt_template.txt +0 -0
  32. {xgae-0.1.4 → xgae-0.1.5}/templates/scp_test_prompt.txt +0 -0
  33. {xgae-0.1.4 → xgae-0.1.5}/templates/system_prompt_response_sample.txt +0 -0
  34. {xgae-0.1.4 → xgae-0.1.5}/templates/system_prompt_template.txt +0 -0
  35. {xgae-0.1.4 → xgae-0.1.5}/uv.lock +0 -0
@@ -151,7 +151,7 @@
151
151
  <workItem from="1755611972189" duration="13340000" />
152
152
  <workItem from="1755668525673" duration="14877000" />
153
153
  <workItem from="1755700523844" duration="24000" />
154
- <workItem from="1755737435202" duration="24716000" />
154
+ <workItem from="1755737435202" duration="48139000" />
155
155
  </task>
156
156
  <servers />
157
157
  </component>
@@ -172,7 +172,7 @@
172
172
  <component name="com.intellij.coverage.CoverageDataManagerImpl">
173
173
  <SUITE FILE_PATH="coverage/xgae$xga_engine.coverage" NAME="xga_engine Coverage Results" MODIFIED="1755580277172" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
174
174
  <SUITE FILE_PATH="coverage/xgae$xga_prompt_builder.coverage" NAME="xga_prompt_builder Coverage Results" MODIFIED="1755587456555" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
175
- <SUITE FILE_PATH="coverage/xgae$run_xga_engine.coverage" NAME="run_xga_engine Coverage Results" MODIFIED="1755768463510" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
175
+ <SUITE FILE_PATH="coverage/xgae$run_xga_engine.coverage" NAME="run_xga_engine Coverage Results" MODIFIED="1755854764155" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
176
176
  <SUITE FILE_PATH="coverage/xgae$utils.coverage" NAME="utils Coverage Results" MODIFIED="1755226923439" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
177
177
  <SUITE FILE_PATH="coverage/xgae$setup_env.coverage" NAME="setup_env Coverage Results" MODIFIED="1755657717310" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
178
178
  <SUITE FILE_PATH="coverage/xgae$xga_mcp_tool_box.coverage" NAME="xga_mcp_tool_box Coverage Results" MODIFIED="1755583099719" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: xgae
3
- Version: 0.1.4
3
+ Version: 0.1.5
4
4
  Summary: Extreme General Agent Engine
5
5
  Requires-Python: >=3.13
6
6
  Requires-Dist: colorlog>=6.9.0
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "xgae"
3
- version = "0.1.4"
3
+ version = "0.1.5"
4
4
  description = "Extreme General Agent Engine"
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.13"
@@ -40,7 +40,7 @@ class NonStreamTaskResponser(TaskResponseProcessor):
40
40
  type="status", content=start_content,
41
41
  is_llm_message=False, metadata={"thread_run_id": thread_run_id}
42
42
  )
43
- if start_msg_obj: yield format_for_yield(start_msg_obj)
43
+ # if start_msg_obj: yield format_for_yield(start_msg_obj)
44
44
 
45
45
  # Extract finish_reason, content, tool calls
46
46
  if hasattr(llm_response, 'choices') and llm_response.choices:
@@ -157,6 +157,9 @@ class NonStreamTaskResponser(TaskResponseProcessor):
157
157
  self.trace.event(name="failed_to_save_tool_result_for_index", level="ERROR",
158
158
  status_message=(f"Failed to save tool result for index {tool_index}"))
159
159
 
160
+ if completed_msg_obj["metadata"].get("agent_should_terminate") == "true":
161
+ finish_reason = "completed"
162
+ break
160
163
  tool_index += 1
161
164
 
162
165
  # --- Save and Yield Final Status ---
@@ -209,5 +212,5 @@ class NonStreamTaskResponser(TaskResponseProcessor):
209
212
  type="status", content=end_content,
210
213
  is_llm_message=False, metadata={"thread_run_id": thread_run_id if 'thread_run_id' in locals() else None}
211
214
  )
212
- if end_msg_obj: yield format_for_yield(end_msg_obj)
215
+ #if end_msg_obj: yield format_for_yield(end_msg_obj)
213
216
 
@@ -5,7 +5,7 @@ from dataclasses import dataclass
5
5
  from typing import List, Dict, Any, Optional, Tuple, Union, Literal, Callable,TypedDict,AsyncGenerator
6
6
  from abc import ABC, abstractmethod
7
7
 
8
- from xgae.engine.xga_base import XGAToolResult, XGAContextMsg, XGAToolBox
8
+ from xgae.engine.xga_base import XGAToolResult, XGAResponseMsg, XGAToolBox
9
9
  # from xgae.utils.setup_env import langfuse
10
10
  from xgae.utils.json_helpers import (
11
11
  safe_json_parse,
@@ -26,7 +26,7 @@ class TaskResponseContext(TypedDict, total=False):
26
26
  trace_id: str
27
27
  model_name: str
28
28
  max_xml_tool_calls: int
29
- add_context_msg: Callable
29
+ add_response_msg_func: Callable
30
30
  tool_box: XGAToolBox
31
31
  tool_execution_strategy: Literal["sequential", "parallel"]
32
32
  xml_adding_strategy: Literal["user_message", "assistant_message", "inline_edit"]
@@ -49,7 +49,6 @@ class Trace:
49
49
  return Span()
50
50
 
51
51
 
52
-
53
52
  @dataclass
54
53
  class ToolExecutionContext:
55
54
  """Context for a tool execution including call details, result, and display info."""
@@ -63,46 +62,6 @@ class ToolExecutionContext:
63
62
  parsing_details: Optional[Dict[str, Any]] = None
64
63
 
65
64
 
66
- @dataclass
67
- class ProcessorConfig:
68
- """
69
- Configuration for response processing and tool execution.
70
-
71
- This class controls how the LLM's responses are processed, including how tool calls
72
- are detected, executed, and their results handled.
73
-
74
- Attributes:
75
- xml_tool_calling: Enable XML-based tool call detection (<tool>...</tool>)
76
- native_tool_calling: Enable OpenAI-style function calling format
77
- execute_tools: Whether to automatically execute detected tool calls
78
- execute_on_stream: For streaming, execute tools as they appear vs. at the end
79
- tool_execution_strategy: How to execute multiple tools ("sequential" or "parallel")
80
- xml_adding_strategy: How to add XML tool results to the conversation
81
- max_xml_tool_calls: Maximum number of XML tool calls to process (0 = no limit)
82
- """
83
-
84
- xml_tool_calling: bool = True
85
- native_tool_calling: bool = False
86
-
87
- execute_tools: bool = True
88
- execute_on_stream: bool = False
89
- tool_execution_strategy: ToolExecutionStrategy = "sequential"
90
- xml_adding_strategy: XmlAddingStrategy = "assistant_message"
91
- max_xml_tool_calls: int = 0 # 0 means no limit
92
-
93
- def __post_init__(self):
94
- """Validate configuration after initialization."""
95
- if self.xml_tool_calling is False and self.native_tool_calling is False and self.execute_tools:
96
- raise ValueError(
97
- "At least one tool calling format (XML or native) must be enabled if execute_tools is True")
98
-
99
- if self.xml_adding_strategy not in ["user_message", "assistant_message", "inline_edit"]:
100
- raise ValueError("xml_adding_strategy must be 'user_message', 'assistant_message', or 'inline_edit'")
101
-
102
- if self.max_xml_tool_calls < 0:
103
- raise ValueError("max_xml_tool_calls must be a non-negative integer (0 = no limit)")
104
-
105
-
106
65
  class TaskResponseProcessor(ABC):
107
66
  def __init__(self, response_context: TaskResponseContext):
108
67
  self.response_context = response_context
@@ -2,15 +2,58 @@ import asyncio
2
2
  import json
3
3
  import logging
4
4
  import uuid
5
+
6
+ from dataclasses import dataclass
5
7
  from datetime import datetime, timezone
6
8
  from typing import List, Dict, Any, Optional, AsyncGenerator, override
7
9
 
8
- from xgae.engine.responser.xga_responser_base import TaskResponseProcessor, ProcessorConfig, TaskResponseContext,TaskRunContinuousState
10
+ from xgae.engine.responser.xga_responser_base import TaskResponseProcessor, ToolExecutionStrategy, XmlAddingStrategy, TaskResponseContext,TaskRunContinuousState
9
11
  from xgae.utils.json_helpers import (
10
12
  ensure_dict, safe_json_parse,
11
13
  to_json_string, format_for_yield
12
14
  )
13
15
 
16
+ @dataclass
17
+ class ProcessorConfig:
18
+ """
19
+ Configuration for response processing and tool execution.
20
+
21
+ This class controls how the LLM's responses are processed, including how tool calls
22
+ are detected, executed, and their results handled.
23
+
24
+ Attributes:
25
+ xml_tool_calling: Enable XML-based tool call detection (<tool>...</tool>)
26
+ native_tool_calling: Enable OpenAI-style function calling format
27
+ execute_tools: Whether to automatically execute detected tool calls
28
+ execute_on_stream: For streaming, execute tools as they appear vs. at the end
29
+ tool_execution_strategy: How to execute multiple tools ("sequential" or "parallel")
30
+ xml_adding_strategy: How to add XML tool results to the conversation
31
+ max_xml_tool_calls: Maximum number of XML tool calls to process (0 = no limit)
32
+ """
33
+
34
+ xml_tool_calling: bool = True
35
+ native_tool_calling: bool = False
36
+
37
+ execute_tools: bool = True
38
+ execute_on_stream: bool = False
39
+ tool_execution_strategy: ToolExecutionStrategy = "sequential"
40
+ xml_adding_strategy: XmlAddingStrategy = "assistant_message"
41
+ max_xml_tool_calls: int = 0 # 0 means no limit
42
+
43
+ def __post_init__(self):
44
+ """Validate configuration after initialization."""
45
+ if self.xml_tool_calling is False and self.native_tool_calling is False and self.execute_tools:
46
+ raise ValueError(
47
+ "At least one tool calling format (XML or native) must be enabled if execute_tools is True")
48
+
49
+ if self.xml_adding_strategy not in ["user_message", "assistant_message", "inline_edit"]:
50
+ raise ValueError("xml_adding_strategy must be 'user_message', 'assistant_message', or 'inline_edit'")
51
+
52
+ if self.max_xml_tool_calls < 0:
53
+ raise ValueError("max_xml_tool_calls must be a non-negative integer (0 = no limit)")
54
+
55
+
56
+
14
57
  class StreamTaskResponser(TaskResponseProcessor):
15
58
  def __init__(self, response_context: TaskResponseContext):
16
59
  super().__init__(response_context)
@@ -7,23 +7,22 @@ class XGAError(Exception):
7
7
  pass
8
8
 
9
9
 
10
- class XGAContextMsg(TypedDict, total=False):
10
+ class XGAResponseMsg(TypedDict, total=False):
11
11
  type: Literal["user", "status", "tool", "assistant", "assistant_response_end"]
12
12
  content: Union[Dict[str, Any], List[Any], str]
13
13
  is_llm_message: bool
14
14
  metadata: Dict[str, Any]
15
15
  message_id: str
16
- session_id: str
17
- agent_id: str
18
16
  task_id: str
19
17
  task_run_id: str
20
18
  trace_id: str
19
+ session_id: Optional[str]
20
+ agent_id: Optional[str]
21
21
 
22
- class XGAResponseMsg(TypedDict, total=False):
23
- type: Literal["content", "status"]
24
- content: Union[Dict[str, Any], List[Any], str]
25
- status: Literal["error", "status"]
26
- message: str
22
+ class XGATaskResult(TypedDict, total=False):
23
+ type: Literal["ask", "answer", "error"]
24
+ content: str
25
+ attachments: Optional[List[str]]
27
26
 
28
27
  @dataclass
29
28
  class XGAToolSchema:
@@ -39,6 +38,7 @@ class XGAToolResult:
39
38
  success: bool
40
39
  output: str
41
40
 
41
+
42
42
  class XGAToolBox(ABC):
43
43
  @abstractmethod
44
44
  async def creat_task_tool_box(self, task_id: str, general_tools: List[str], custom_tools: List[str]):
@@ -6,7 +6,7 @@ from typing import List, Any, Dict, Optional, AsyncGenerator, cast, Union, Liter
6
6
  from uuid import uuid4
7
7
 
8
8
  from xgae.engine.responser.xga_responser_base import TaskResponseContext, TaskResponseProcessor, TaskRunContinuousState
9
- from xgae.engine.xga_base import XGAContextMsg, XGAToolBox, XGAResponseMsg
9
+ from xgae.engine.xga_base import XGAResponseMsg, XGAToolBox, XGATaskResult
10
10
  from xgae.utils.llm_client import LLMClient, LLMConfig
11
11
  from xgae.utils.setup_env import langfuse
12
12
  from xgae.utils.utils import handle_error
@@ -23,9 +23,9 @@ class XGATaskEngine:
23
23
  llm_config: Optional[LLMConfig] = None,
24
24
  prompt_builder: Optional[XGAPromptBuilder] = None,
25
25
  tool_box: Optional[XGAToolBox] = None):
26
- self.session_id = session_id if session_id else f"xga_sid_{uuid4()}"
27
26
  self.task_id = task_id if task_id else f"xga_task_{uuid4()}"
28
27
  self.agent_id = agent_id
28
+ self.session_id = session_id
29
29
 
30
30
  self.llm_client = LLMClient(llm_config)
31
31
  self.model_name = self.llm_client.model_name
@@ -34,12 +34,12 @@ class XGATaskEngine:
34
34
  self.prompt_builder = prompt_builder or XGAPromptBuilder(system_prompt)
35
35
  self.tool_box = tool_box or XGAMcpToolBox()
36
36
 
37
- self.task_context_msgs: List[XGAContextMsg] = []
37
+ self.task_response_msgs: List[XGAResponseMsg] = []
38
38
  self.task_no = -1
39
39
  self.task_run_id = f"{self.task_id}[{self.task_no}]"
40
40
  self.trace_id = None
41
41
 
42
- async def __async_init__(self, general_tools:List[str], custom_tools: List[str]) -> None:
42
+ async def _post_init_(self, general_tools:List[str], custom_tools: List[str]) -> None:
43
43
  await self.tool_box.load_mcp_tools_schema()
44
44
  await self.tool_box.creat_task_tool_box(self.task_id, general_tools, custom_tools)
45
45
  general_tool_schemas = self.tool_box.get_task_tool_schemas(self.task_id, "general_tool")
@@ -66,9 +66,9 @@ class XGATaskEngine:
66
66
  prompt_builder=prompt_builder,
67
67
  tool_box=tool_box)
68
68
 
69
- general_tools = general_tools or ["complete"]
69
+ general_tools = general_tools or ["complete", "ask"]
70
70
  custom_tools = custom_tools or []
71
- await engine.__async_init__(general_tools, custom_tools)
71
+ await engine._post_init_(general_tools, custom_tools)
72
72
 
73
73
  logging.info("*"*30 + f" XGATaskEngine Task'{engine.task_id}' Initialized " + "*"*30)
74
74
  logging.info(f"model_name={engine.model_name}, is_stream={engine.is_stream}, trace_id={engine.trace_id}")
@@ -76,6 +76,16 @@ class XGATaskEngine:
76
76
 
77
77
  return engine
78
78
 
79
+ async def run_task_with_final_answer(self,
80
+ task_message: Dict[str, Any],
81
+ max_auto_run: int = 25,
82
+ trace_id: Optional[str] = None) -> XGATaskResult:
83
+ chunks = []
84
+ async for chunk in self.run_task(task_message=task_message, max_auto_run=max_auto_run, trace_id=trace_id):
85
+ chunks.append(chunk)
86
+
87
+ final_result = self._parse_final_result(chunks)
88
+ return final_result
79
89
 
80
90
  async def run_task(self,
81
91
  task_message: Dict[str, Any],
@@ -87,7 +97,7 @@ class XGATaskEngine:
87
97
  self.task_no += 1
88
98
  self.task_run_id = f"{self.task_id}[{self.task_no}]"
89
99
 
90
- self.add_context_msg(type="user", content=task_message, is_llm_message=True)
100
+ self.add_response_msg(type="user", content=task_message, is_llm_message=True)
91
101
 
92
102
  if max_auto_run <= 1:
93
103
  continuous_state:TaskRunContinuousState = {
@@ -105,7 +115,7 @@ class XGATaskEngine:
105
115
 
106
116
  async def _run_task_once(self, continuous_state: TaskRunContinuousState) -> AsyncGenerator[Dict[str, Any], None]:
107
117
  llm_messages = [{"role": "system", "content": self.task_prompt}]
108
- cxt_llm_contents = self._get_context_llm_contents()
118
+ cxt_llm_contents = self._get_response_llm_contents()
109
119
  llm_messages.extend(cxt_llm_contents)
110
120
 
111
121
  partial_content = continuous_state.get('accumulated_content', '')
@@ -120,10 +130,10 @@ class XGATaskEngine:
120
130
  response_processor = self._create_response_processer()
121
131
 
122
132
  async for chunk in response_processor.process_response(llm_response, llm_messages, continuous_state):
133
+ self._reponse_chunk_log(chunk)
123
134
  yield chunk
124
135
 
125
-
126
- async def _run_task_auto(self, max_auto_run: int) -> AsyncGenerator:
136
+ async def _run_task_auto(self, max_auto_run: int) -> AsyncGenerator[Dict[str, Any], None]:
127
137
  continuous_state: TaskRunContinuousState = {
128
138
  "accumulated_content": "",
129
139
  "auto_continue_count": 0,
@@ -141,61 +151,101 @@ class XGATaskEngine:
141
151
 
142
152
  try:
143
153
  async for chunk in self._run_task_once(continuous_state):
154
+ yield chunk
144
155
  try:
145
156
  if chunk.get("type") == "status":
146
157
  content = json.loads(chunk.get('content', '{}'))
147
158
  status_type = content.get('status_type', None)
148
159
  if status_type == "error":
149
160
  logging.error(f"run_task_auto: task_response error: {chunk.get('message', 'Unknown error')}")
150
- yield chunk
151
- return
161
+ auto_continue = False
162
+ break
152
163
  elif status_type == 'finish':
153
164
  finish_reason = content.get('finish_reason', None)
154
- if finish_reason == 'stop' :
155
- auto_continue = True
156
- auto_continue_count += 1
157
- update_continuous_state(auto_continue_count, auto_continue)
158
- logging.info(f"run_task_auto: Detected finish_reason='stop', auto-continuing ({auto_continue_count}/{max_auto_run})")
159
- continue
165
+ if finish_reason == 'completed':
166
+ logging.warning(f"run_task_auto: Detected finish_reason='completed', Task Completed Success !")
167
+ auto_continue = False
168
+ break
160
169
  elif finish_reason == 'xml_tool_limit_reached':
161
- logging.info(f"run_task_auto: Detected finish_reason='xml_tool_limit_reached', stopping auto-continue")
170
+ logging.warning(f"run_task_auto: Detected finish_reason='xml_tool_limit_reached', stopping auto-continue")
162
171
  auto_continue = False
163
- update_continuous_state(auto_continue_count, auto_continue)
164
- elif finish_reason == 'length':
172
+ break
173
+ elif finish_reason == 'stop' or finish_reason == 'length': # 'length' never occur
165
174
  auto_continue = True
166
175
  auto_continue_count += 1
167
176
  update_continuous_state(auto_continue_count, auto_continue)
168
- logging.info(f"run_task_auto: Detected finish_reason='length', auto-continuing ({auto_continue_count}/{max_auto_run})")
169
- continue
177
+ logging.info(f"run_task_auto: Detected finish_reason='{finish_reason}', auto-continuing ({auto_continue_count}/{max_auto_run})")
178
+ except StopAsyncIteration:
179
+ pass
170
180
  except Exception as parse_error:
171
181
  logging.error(f"run_task_auto: Error in parse chunk: {str(parse_error)}")
172
- yield {
173
- "type": "status",
174
- "status": "error",
175
- "message": f"Error in parse chunk: {str(parse_error)}"
176
- }
177
- return
178
-
179
- # Otherwise just yield the chunk normally
180
- yield chunk
181
-
182
- # If not auto-continuing, we're done
183
- if not auto_continue:
184
- break
182
+ content = {"role": "system", "status_type": "error", "message": "Parse response chunk Error"}
183
+ error_msg = self.add_response_msg(type="status", content=content, is_llm_message=False)
184
+ yield error_msg
185
185
  except Exception as run_error:
186
186
  logging.error(f"run_task_auto: Call task_run_once error: {str(run_error)}")
187
- yield {
188
- "type": "status",
189
- "status": "error",
190
- "message": f"Call task_run_once error: {str(run_error)}"
191
- }
192
- return
187
+ content = {"role": "system", "status_type": "error", "message": "Call task_run_once error"}
188
+ error_msg = self.add_response_msg(type="status", content=content, is_llm_message=False)
189
+ yield error_msg
193
190
 
194
- def add_context_msg(self, type: Literal["user", "status", "tool", "assistant", "assistant_response_end"],
195
- content: Union[Dict[str, Any], List[Any], str],
196
- is_llm_message: bool,
197
- metadata: Optional[Dict[str, Any]]=None)-> XGAContextMsg:
198
- message = XGAContextMsg(
191
+ def _parse_final_result(self, chunks: List[Dict[str, Any]]) -> XGATaskResult:
192
+ final_result: XGATaskResult = None
193
+ try:
194
+ finish_reason = ''
195
+ for chunk in reversed(chunks):
196
+ chunk_type = chunk.get("type")
197
+ if chunk_type == "status":
198
+ status_content = json.loads(chunk.get('content', '{}'))
199
+ status_type = status_content.get('status_type', None)
200
+ if status_type == "error":
201
+ error = status_content.get('message', 'Unknown error')
202
+ final_result = XGATaskResult(type="error", content=error)
203
+ break
204
+ elif status_type == "finish":
205
+ finish_reason = status_content.get('finish_reason', None)
206
+ if finish_reason == 'xml_tool_limit_reached':
207
+ error = "Completed due to over task max_auto_run limit !"
208
+ final_result = XGATaskResult(type="error", content=error)
209
+ break
210
+ continue
211
+ elif chunk_type == "tool" and finish_reason in ['completed', 'stop']:
212
+ tool_content = json.loads(chunk.get('content', '{}'))
213
+ tool_execution = tool_content.get('tool_execution')
214
+ tool_name = tool_execution.get('function_name')
215
+ if tool_name == "complete":
216
+ result_content = tool_execution["arguments"].get("text", "Task completed with no answer")
217
+ attachments = tool_execution["arguments"].get("attachments", None)
218
+ final_result = XGATaskResult(type="answer", content=result_content, attachments=attachments)
219
+ elif tool_name == "ask":
220
+ result_content = tool_execution["arguments"].get("text", "Task ask for more info")
221
+ attachments = tool_execution["arguments"].get("attachments", None)
222
+ final_result = XGATaskResult(type="ask", content=result_content, attachments=attachments)
223
+ else:
224
+ tool_result = tool_execution.get("result", None)
225
+ if tool_result is not None:
226
+ success = tool_result.get("success")
227
+ output = tool_result.get("output")
228
+ result_type = "answer" if success else "error"
229
+ result_content = f"Task execute '{tool_name}' {result_type}: {output}"
230
+ final_result = XGATaskResult(type=result_type, content=result_content)
231
+ elif chunk_type == "assistant" and finish_reason == 'stop':
232
+ assis_content = chunk.get('content', '{}')
233
+ result_content = assis_content.get("content", "LLM output is empty")
234
+ final_result = XGATaskResult(type="answer", content=result_content)
235
+ if final_result is not None:
236
+ break
237
+ except Exception as e:
238
+ logging.error(f"parse_final_result: Final result pass error: {str(e)}")
239
+ final_result = XGATaskResult(type="error", content="Parse final result failed!")
240
+ handle_error(e)
241
+
242
+ return final_result
243
+
244
+ def add_response_msg(self, type: Literal["user", "status", "tool", "assistant", "assistant_response_end"],
245
+ content: Union[Dict[str, Any], List[Any], str],
246
+ is_llm_message: bool,
247
+ metadata: Optional[Dict[str, Any]]=None)-> XGAResponseMsg:
248
+ message = XGAResponseMsg(
199
249
  message_id = f"xga_msg_{uuid4()}",
200
250
  type = type,
201
251
  content = content,
@@ -207,13 +257,13 @@ class XGATaskEngine:
207
257
  task_run_id = self.task_run_id,
208
258
  trace_id = self.trace_id
209
259
  )
210
- self.task_context_msgs.append(message)
260
+ self.task_response_msgs.append(message)
211
261
 
212
262
  return message
213
263
 
214
- def _get_context_llm_contents (self) -> List[Dict[str, Any]]:
264
+ def _get_response_llm_contents (self) -> List[Dict[str, Any]]:
215
265
  llm_messages = []
216
- for message in self.task_context_msgs:
266
+ for message in self.task_response_msgs:
217
267
  if message["is_llm_message"]:
218
268
  llm_messages.append(message)
219
269
 
@@ -251,13 +301,30 @@ class XGATaskEngine:
251
301
  "trace_id": self.trace_id,
252
302
  "model_name": self.model_name,
253
303
  "max_xml_tool_calls": 0,
254
- "add_context_msg": self.add_context_msg,
304
+ "add_context_msg": self.add_response_msg,
255
305
  "tool_box": self.tool_box,
256
306
  "tool_execution_strategy": "parallel",
257
307
  "xml_adding_strategy": "user_message",
258
308
  }
259
309
  return response_context
260
310
 
311
+ def _reponse_chunk_log(self, chunk):
312
+ chunk_type = chunk.get('type')
313
+ prefix = ""
314
+
315
+ if chunk_type == 'status':
316
+ content = json.loads(chunk.get('content', '{}'))
317
+ status_type = content.get('status_type', "empty")
318
+ prefix = "-" + status_type
319
+ elif chunk_type == 'tool':
320
+ tool_content = json.loads(chunk.get('content', '{}'))
321
+ tool_execution = tool_content.get('tool_execution')
322
+ tool_name = tool_execution.get('function_name')
323
+ prefix = "-" + tool_name
324
+
325
+ logging.info(f"TASK_RESP_CHUNK[{chunk_type}{prefix}]: {chunk}")
326
+
327
+
261
328
  if __name__ == "__main__":
262
329
  import asyncio
263
330
  from xgae.utils.utils import read_file
@@ -269,10 +336,12 @@ if __name__ == "__main__":
269
336
  custom_tools=["bomc_fault.*"],
270
337
  llm_config=LLMConfig(stream=False),
271
338
  system_prompt=system_prompt)
272
- #engine = await XGATaskEngine.create()
273
-
274
- async for chunk in engine.run_task(task_message={"role": "user", "content": "定位10.0.0.1的故障"},
275
- max_auto_run=8):
276
- print(chunk)
277
-
339
+ # engine = await XGATaskEngine.create(llm_config=LLMConfig(stream=False))
340
+ #chunks = []
341
+ # async for chunk in engine.run_task(task_message={"role": "user", "content": "定位10.0.0.1的故障"},max_auto_run=8):
342
+ # print(chunk)
343
+ #final_result = await engine.run_task_with_final_answer(task_message={"role": "user", "content": "1+1"}, max_auto_run=2)
344
+
345
+ final_result = await engine.run_task_with_final_answer(task_message={"role": "user", "content": "定位10.0.1.1故障"},max_auto_run=8)
346
+ print("FINAL RESULT:", final_result)
278
347
  asyncio.run(main())
@@ -112,10 +112,7 @@ class XGAMcpToolBox(XGAToolBox):
112
112
  if mcp_tool:
113
113
  tool_args = args or {}
114
114
  if server_name == self.GENERAL_MCP_SERVER_NAME:
115
- pass
116
- #tool_args["task_id"] = task_id #xga general tool, first param must be task_id
117
- else:
118
- tool_args = args
115
+ tool_args = dict({"task_id": task_id}, **tool_args)
119
116
 
120
117
  try:
121
118
  tool_result = await mcp_tool.arun(tool_args)
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes