xgae 0.1.20__tar.gz → 0.2.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of xgae might be problematic. Click here for more details.

Files changed (48) hide show
  1. {xgae-0.1.20 → xgae-0.2.1}/.env +1 -0
  2. {xgae-0.1.20 → xgae-0.2.1}/CHANGELOG.md +18 -3
  3. {xgae-0.1.20 → xgae-0.2.1}/PKG-INFO +6 -2
  4. {xgae-0.1.20 → xgae-0.2.1}/pyproject.toml +9 -2
  5. {xgae-0.1.20 → xgae-0.2.1}/src/examples/agent/langgraph/react/agent_base.py +3 -2
  6. xgae-0.2.1/src/examples/agent/langgraph/react/custom_prompt_rag.py +68 -0
  7. {xgae-0.1.20 → xgae-0.2.1}/src/examples/agent/langgraph/react/react_agent.py +112 -37
  8. xgae-0.1.20/src/examples/agent/langgraph/react/final_result_agent.py → xgae-0.2.1/src/examples/agent/langgraph/react/result_eval_agent.py +15 -8
  9. {xgae-0.1.20 → xgae-0.2.1}/src/examples/agent/langgraph/react/run_react_agent.py +1 -1
  10. {xgae-0.1.20 → xgae-0.2.1}/src/examples/engine/run_custom_and_agent_tools.py +1 -1
  11. {xgae-0.1.20 → xgae-0.2.1}/src/xgae/engine/task_engine.py +2 -1
  12. {xgae-0.1.20 → xgae-0.2.1}/src/xgae/utils/llm_client.py +1 -1
  13. {xgae-0.1.20 → xgae-0.2.1}/templates/agent_tool_prompt_template.txt +1 -0
  14. {xgae-0.1.20 → xgae-0.2.1}/templates/custom_tool_prompt_template.txt +11 -8
  15. xgae-0.1.20/templates/example/final_result_template.txt → xgae-0.2.1/templates/example/result_eval_template.txt +10 -5
  16. {xgae-0.1.20 → xgae-0.2.1}/templates/general_tool_prompt_template.txt +1 -0
  17. {xgae-0.1.20 → xgae-0.2.1}/uv.lock +1102 -9
  18. {xgae-0.1.20 → xgae-0.2.1}/.python-version +0 -0
  19. {xgae-0.1.20 → xgae-0.2.1}/README.md +0 -0
  20. {xgae-0.1.20 → xgae-0.2.1}/mcpservers/custom_servers.json +0 -0
  21. {xgae-0.1.20 → xgae-0.2.1}/mcpservers/xga_server.json +0 -0
  22. {xgae-0.1.20 → xgae-0.2.1}/mcpservers/xga_server_sse.json +0 -0
  23. {xgae-0.1.20 → xgae-0.2.1}/src/examples/engine/run_general_tools.py +0 -0
  24. {xgae-0.1.20 → xgae-0.2.1}/src/examples/engine/run_human_in_loop.py +0 -0
  25. {xgae-0.1.20 → xgae-0.2.1}/src/examples/engine/run_simple.py +0 -0
  26. {xgae-0.1.20 → xgae-0.2.1}/src/examples/tools/custom_fault_tools_app.py +0 -0
  27. {xgae-0.1.20 → xgae-0.2.1}/src/examples/tools/simu_a2a_tools_app.py +0 -0
  28. {xgae-0.1.20 → xgae-0.2.1}/src/xgae/__init__.py +0 -0
  29. {xgae-0.1.20 → xgae-0.2.1}/src/xgae/engine/engine_base.py +0 -0
  30. {xgae-0.1.20 → xgae-0.2.1}/src/xgae/engine/mcp_tool_box.py +0 -0
  31. {xgae-0.1.20 → xgae-0.2.1}/src/xgae/engine/prompt_builder.py +0 -0
  32. {xgae-0.1.20 → xgae-0.2.1}/src/xgae/engine/responser/non_stream_responser.py +0 -0
  33. {xgae-0.1.20 → xgae-0.2.1}/src/xgae/engine/responser/responser_base.py +0 -0
  34. {xgae-0.1.20 → xgae-0.2.1}/src/xgae/engine/responser/stream_responser.py +0 -0
  35. {xgae-0.1.20 → xgae-0.2.1}/src/xgae/engine/task_langfuse.py +0 -0
  36. {xgae-0.1.20 → xgae-0.2.1}/src/xgae/engine_cli_app.py +0 -0
  37. {xgae-0.1.20 → xgae-0.2.1}/src/xgae/tools/without_general_tools_app.py +0 -0
  38. {xgae-0.1.20 → xgae-0.2.1}/src/xgae/utils/__init__.py +0 -0
  39. {xgae-0.1.20 → xgae-0.2.1}/src/xgae/utils/json_helpers.py +0 -0
  40. {xgae-0.1.20 → xgae-0.2.1}/src/xgae/utils/misc.py +0 -0
  41. {xgae-0.1.20 → xgae-0.2.1}/src/xgae/utils/setup_env.py +0 -0
  42. {xgae-0.1.20 → xgae-0.2.1}/src/xgae/utils/xml_tool_parser.py +0 -0
  43. {xgae-0.1.20 → xgae-0.2.1}/templates/example/fault_user_prompt.txt +0 -0
  44. {xgae-0.1.20 → xgae-0.2.1}/templates/gemini_system_prompt_template.txt +0 -0
  45. {xgae-0.1.20 → xgae-0.2.1}/templates/system_prompt_response_sample.txt +0 -0
  46. {xgae-0.1.20 → xgae-0.2.1}/templates/system_prompt_template.txt +0 -0
  47. {xgae-0.1.20 → xgae-0.2.1}/test/test_langfuse.py +0 -0
  48. {xgae-0.1.20 → xgae-0.2.1}/test/test_litellm_langfuse.py +0 -0
@@ -12,6 +12,7 @@ LANGFUSE_HOST=https://cloud.langfuse.com
12
12
 
13
13
  # LLM
14
14
  LLM_MODEL=openai/qwen3-235b-a22b
15
+ #LLM_MODEL=openai/qwen3-4b
15
16
  LLM_API_BASE=https://dashscope.aliyuncs.com/compatible-mode/v1
16
17
  LLM_API_KEY=
17
18
  LLM_MAX_TOKENS=16384
@@ -1,12 +1,27 @@
1
1
  # Release Changelog
2
- ## [0.1.20] - 2025-9-8
2
+ ## [0.2.1] - 2025-9-17
3
3
  ### Added
4
- - Example: Langgraph react agent add final_result_agent
4
+ - Example Langgraph ReactAgent: add Chromadb for custom prompt RAG
5
+ ### Modified
6
+ - pyproject.toml: add [project.optional-dependencies] 'examples'
7
+
8
+ ## [0.2.0] - 2025-9-10
9
+ ### Added
10
+ - Agent Engine release 0.2
11
+ - Example: Langgraph ReactAgent release 0.2
12
+ ### Fixed
13
+ - Agent Engine: call mcp tool fail, call 'ask' tool again and again
14
+ - Example Langgraph ReactAgent: retry on 'ask', user_input is ask answer
15
+
16
+
17
+ ## [0.1.20] - 2025-9-9
18
+ ### Added
19
+ - Example: Langgraph ReactAgent add final_result_agent
5
20
 
6
21
 
7
22
  ## [0.1.19] - 2025-9-8
8
23
  ### Added
9
- - Example: Langgraph react agent release V1, full logic but no final result agent and tool select agent
24
+ - Example: Langgraph ReactAgent release V1, full logic but no final result agent and tool select agent
10
25
 
11
26
 
12
27
  # Release Changelog
@@ -1,12 +1,16 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: xgae
3
- Version: 0.1.20
3
+ Version: 0.2.1
4
4
  Summary: Extreme General Agent Engine
5
5
  Requires-Python: >=3.13
6
6
  Requires-Dist: colorlog==6.9.0
7
7
  Requires-Dist: langchain-mcp-adapters==0.1.9
8
8
  Requires-Dist: langchain==0.3.27
9
9
  Requires-Dist: langfuse==2.60.9
10
- Requires-Dist: langgraph==0.6.5
11
10
  Requires-Dist: litellm==1.74.15
12
11
  Requires-Dist: mcp==1.13.0
12
+ Provides-Extra: examples
13
+ Requires-Dist: chromadb==1.1.0; extra == 'examples'
14
+ Requires-Dist: fastembed==0.7.3; extra == 'examples'
15
+ Requires-Dist: langchain-community==0.3.29; extra == 'examples'
16
+ Requires-Dist: langgraph==0.6.5; extra == 'examples'
@@ -1,19 +1,26 @@
1
1
  [project]
2
2
  name = "xgae"
3
- version = "0.1.20"
3
+ version = "0.2.1"
4
4
  description = "Extreme General Agent Engine"
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.13"
7
7
  dependencies = [
8
8
  "colorlog==6.9.0",
9
9
  "langchain-mcp-adapters==0.1.9",
10
- "langgraph==0.6.5",
11
10
  "litellm==1.74.15",
12
11
  "mcp==1.13.0",
13
12
  "langfuse==2.60.9",
14
13
  "langchain==0.3.27",
15
14
  ]
16
15
 
16
+ [project.optional-dependencies]
17
+ examples = [
18
+ "langgraph==0.6.5",
19
+ "langchain-community==0.3.29",
20
+ "fastembed==0.7.3",
21
+ "chromadb==1.1.0",
22
+ ]
23
+
17
24
  [build-system]
18
25
  requires = ["hatchling"]
19
26
  build-backend = "hatchling.build"
@@ -17,7 +17,7 @@ class AgentContext(TypedDict, total=False):
17
17
  class TaskState(TypedDict, total=False):
18
18
  """State definition for the agent orchestration graph"""
19
19
  llm_messages: List[Dict[str, Any]]
20
- user_input: str
20
+ user_inputs: List[str]
21
21
  next_node: str
22
22
  system_prompt: str
23
23
  custom_tools: List[str]
@@ -25,7 +25,8 @@ class TaskState(TypedDict, total=False):
25
25
  task_result: XGATaskResult
26
26
  final_result: XGATaskResult
27
27
  eval_result: EvaluateResult
28
- iteration_count: int
28
+ retry_count: int
29
+ task_no: int
29
30
  agent_context: AgentContext
30
31
 
31
32
 
@@ -0,0 +1,68 @@
1
+ import logging
2
+ import os
3
+ from typing import List, Optional
4
+
5
+ os.environ['HF_ENDPOINT']="https://hf-mirror.com"
6
+ os.environ['TOKENIZERS_PARALLELISM']="False"
7
+
8
+ from langchain_community.embeddings.fastembed import FastEmbedEmbeddings
9
+ from langchain_community.vectorstores import Chroma
10
+ from langchain_core.documents import Document
11
+
12
+ class CustomPromptRag:
13
+ PROMPT_SIMILAR_SCORE = 0.8
14
+
15
+ def __init__(self, emb_model_name: Optional[str] = "BAAI/bge-small-zh-v1.5"):
16
+ self.emb_model_name = emb_model_name
17
+ prompt_docs = []
18
+ # should read from DB, load all custom prompt or COT
19
+ prompt_docs.append(self._create_prompt_doc(
20
+ prompt_summary="Fault location and analysis of fault causes",
21
+ prompt_path="templates/example/fault_user_prompt.txt"
22
+ ))
23
+ self.vector_store = self._init_vector_store(prompt_docs)
24
+
25
+
26
+ def _create_prompt_doc(self, prompt_summary: str, prompt_path: str)-> Document:
27
+ return Document(
28
+ page_content=prompt_summary,
29
+ metadata={
30
+ "source": prompt_path,
31
+ }
32
+ )
33
+
34
+
35
+ def _init_vector_store(self, docs: List[Document]) -> Chroma:
36
+ # FastEmbedEmbeddings first time will download BAAI/bge-small-zh-v1.5 embedding model from HF
37
+ embeddings = FastEmbedEmbeddings(model_name=self.emb_model_name)
38
+ return Chroma.from_documents(documents=docs, embedding=embeddings, persist_directory=None)
39
+
40
+
41
+ def search_prompt(self, query:str)-> str | None:
42
+ prompt_path = None
43
+ results = self.vector_store.similarity_search_with_score(query=query, k=1)
44
+ if results and len(results) > 0:
45
+ doc, score = results[0]
46
+ if score > self.PROMPT_SIMILAR_SCORE:
47
+ logging.info(f"CustomPromptRag search: SIMILAR_SCORE: {score} > {self.PROMPT_SIMILAR_SCORE}, "
48
+ f"\nquery: '{query}' \nprompt_summary: '{doc.page_content}'\n")
49
+ else:
50
+ prompt_path = doc.metadata['source']
51
+ logging.info(f"CustomPromptRag search: SIMILAR_SCORE: {score}, prompt_path: '{prompt_path}'")
52
+
53
+ return prompt_path
54
+
55
+ if __name__ == "__main__":
56
+ from xgae.utils.setup_env import setup_logging
57
+
58
+ setup_logging()
59
+
60
+ custom_prompt_rag = CustomPromptRag()
61
+
62
+ querys = ["locate 10.2.3.4 fault and solution",
63
+ "5+7"]
64
+
65
+ for query in querys:
66
+ logging.info("*"*50)
67
+ logging.info(f"query: '{query}'")
68
+ custom_prompt_rag.search_prompt(query)
@@ -21,13 +21,14 @@ from xgae.engine.mcp_tool_box import XGAMcpToolBox
21
21
  from xgae.engine.task_engine import XGATaskEngine
22
22
 
23
23
  from examples.agent.langgraph.react.agent_base import AgentContext, TaskState, EvaluateResult
24
- from examples.agent.langgraph.react.final_result_agent import FinalResultAgent
24
+ from examples.agent.langgraph.react.result_eval_agent import TaskResultEvalAgent
25
+ from examples.agent.langgraph.react.custom_prompt_rag import CustomPromptRag
25
26
 
26
27
  class XGAReactAgent:
27
28
  MAX_TASK_RETRY = 2
28
29
  QUALIFIED_RESULT_SCORE = 0.7
29
30
 
30
- def __init__(self):
31
+ def __init__(self, use_prompt_rag: Optional[bool] = False):
31
32
  self.graph = None
32
33
 
33
34
  self.graph_config = None
@@ -35,7 +36,10 @@ class XGAReactAgent:
35
36
  self.task_engine: XGATaskEngine = None
36
37
 
37
38
  self.tool_box = XGAMcpToolBox(custom_mcp_server_file="mcpservers/custom_servers.json")
38
- self.final_result_agent = FinalResultAgent()
39
+ self.result_eval_agent = TaskResultEvalAgent()
40
+
41
+ if use_prompt_rag:
42
+ self.custom_prompt_rag = CustomPromptRag()
39
43
 
40
44
  async def _create_graph(self) -> StateGraph:
41
45
  try:
@@ -43,6 +47,7 @@ class XGAReactAgent:
43
47
 
44
48
  # Add nodes
45
49
  graph_builder.add_node('supervisor', self._supervisor_node)
50
+ graph_builder.add_node('prompt_optimize', self._prompt_optimize_node)
46
51
  graph_builder.add_node('select_tool', self._select_tool_node)
47
52
  graph_builder.add_node('exec_task', self._exec_task_node)
48
53
  graph_builder.add_node('final_result', self._final_result_node)
@@ -53,12 +58,14 @@ class XGAReactAgent:
53
58
  'supervisor',
54
59
  self._next_condition,
55
60
  {
56
- 'select_tool': 'select_tool',
57
- 'exec_task': 'exec_task',
58
- 'end': END
61
+ 'select_tool' : 'select_tool',
62
+ 'exec_task' : 'exec_task',
63
+ 'prompt_optimize' : 'prompt_optimize',
64
+ 'end' : END
59
65
  }
60
66
  )
61
67
 
68
+ graph_builder.add_edge('prompt_optimize', 'select_tool')
62
69
  graph_builder.add_edge('select_tool', 'exec_task')
63
70
  graph_builder.add_edge('exec_task', 'final_result')
64
71
 
@@ -67,8 +74,8 @@ class XGAReactAgent:
67
74
  self._next_condition,
68
75
  {
69
76
  'supervisor': 'supervisor',
70
- 'exec_task': 'exec_task',
71
- 'end': END
77
+ 'exec_task' : 'exec_task',
78
+ 'end' : END
72
79
  }
73
80
  )
74
81
 
@@ -80,64 +87,106 @@ class XGAReactAgent:
80
87
  logging.error("Failed to create XGARectAgent Graph: %s", str(e))
81
88
  raise
82
89
 
90
+
83
91
  def _search_system_prompt(self, user_input: str) -> str:
84
- # You should search RAG use user_input, fetch COT or Prompt for your business
85
- system_prompt = None if "fault" not in user_input else read_file("templates/example/fault_user_prompt.txt")
92
+ if hasattr(self, 'custom_prompt_rag'):
93
+ system_prompt = self.custom_prompt_rag.search_prompt(user_input)
94
+ else:
95
+ system_prompt = None if "fault" not in user_input else read_file("templates/example/fault_user_prompt.txt")
86
96
  return system_prompt
87
97
 
98
+
88
99
  async def _supervisor_node(self, state: TaskState) -> Dict[str, Any]:
89
- user_input = state['user_input']
100
+ user_input = state['user_inputs'][0]
90
101
  eval_result = state.get('eval_result', None)
91
102
 
92
103
  system_prompt = self._search_system_prompt(user_input)
104
+ is_system_prompt = True if system_prompt is not None else False
93
105
 
94
106
  general_tools = [] if system_prompt else ["*"]
95
107
  custom_tools = ["*"] if system_prompt else []
96
108
 
109
+ task_plan_score = None
110
+ if eval_result and 'task_plan' in eval_result and 'score' in eval_result['task_plan']:
111
+ task_plan_score = eval_result['task_plan'].get('score', 1.0)
112
+
113
+ function_call_score = None
97
114
  if eval_result and 'function_call' in eval_result and 'score' in eval_result['function_call']:
98
- score = eval_result['function_call'].get('score', 1.0)
99
- if score < self.QUALIFIED_RESULT_SCORE:
100
- next_node = "select_tool"
101
- else:
102
- next_node = "end"
115
+ function_call_score = eval_result['function_call'].get('score', 1.0)
116
+
117
+ super_state = {}
118
+ if task_plan_score and task_plan_score < self.QUALIFIED_RESULT_SCORE:
119
+ next_node = "prompt_optimize"
120
+ super_state = self._prepare_task_retry(state)
121
+ logging.warning(f"****** ReactAgent TASK_RETRY: task_plan_score={task_plan_score} < {self.QUALIFIED_RESULT_SCORE} , "
122
+ f"Start Optimize Prompt ...")
123
+ elif function_call_score and function_call_score < self.QUALIFIED_RESULT_SCORE:
124
+ next_node = "select_tool"
125
+ super_state = self._prepare_task_retry(state)
126
+ logging.warning(f"****** ReactAgent TASK_RETRY: function_call_score={function_call_score} < {self.QUALIFIED_RESULT_SCORE} , "
127
+ f"Select Tool Again ...")
128
+ elif eval_result is not None: # retry condition is not satisfied, end task
129
+ next_node = "end"
103
130
  else:
104
- next_node = "select_tool" if system_prompt else "exec_task"
131
+ next_node = "select_tool" if is_system_prompt else "exec_task"
105
132
 
133
+ logging.info(f"ReactAgent supervisor_node: is_system_prompt={is_system_prompt}, next_node={next_node}")
134
+
135
+ super_state['next_node'] = next_node
136
+ super_state['system_prompt'] = system_prompt
137
+ super_state['custom_tools'] = custom_tools
138
+ super_state['general_tools'] = general_tools
139
+
140
+ return super_state
141
+
142
+
143
+ async def _prompt_optimize_node(self, state: TaskState) -> Dict[str, Any]:
144
+ system_prompt = state['system_prompt']
145
+ logging.info("ReactAgent prompt_optimize_node: optimize system prompt")
146
+ # @todo optimize system prompt in future
106
147
  return {
107
148
  'system_prompt' : system_prompt,
108
- 'next_node' : next_node,
109
- 'general_tools' : general_tools,
110
- 'custom_tools' : custom_tools,
111
149
  }
112
150
 
151
+
113
152
  def _select_custom_tools(self, system_prompt: str) -> list[str]:
153
+ # @todo search mcp tool based on system prompt or user_input in future
114
154
  custom_tools = ["*"] if system_prompt else []
115
155
  return custom_tools
116
156
 
157
+
117
158
  async def _select_tool_node(self, state: TaskState) -> Dict[str, Any]:
118
159
  system_prompt = state.get('system_prompt',None)
119
160
  general_tools = []
161
+
162
+ logging.info("ReactAgent select_tool_node: select tool based on system_prompt")
120
163
  custom_tools = self._select_custom_tools(system_prompt)
121
164
  return {
122
165
  'general_tools' : general_tools,
123
166
  'custom_tools' : custom_tools,
124
167
  }
125
168
 
169
+
126
170
  async def _exec_task_node(self, state: TaskState) -> Dict[str, Any]:
127
- user_input = state['user_input']
171
+ user_input = state['user_inputs'][0]
128
172
  system_prompt = state.get('system_prompt',None)
129
173
  general_tools = state.get('general_tools',[])
130
174
  custom_tools = state.get('custom_tools',[])
175
+ retry_count = state.get('retry_count', 0)
176
+ task_no = state.get('task_no', 0)
131
177
  is_system_prompt = True if system_prompt is not None else False
132
178
 
133
179
  trace_id = self.graph_langfuse.get_trace_id()
134
180
  llm_messages = []
135
181
  try:
136
- logging.info(f"🔥 XGATaskEngine run_task: user_input={user_input}, general_tools={general_tools}, "
182
+ logging.info(f"🔥 ReactAgent exec_task_node: user_input={user_input}, general_tools={general_tools}, "
137
183
  f"custom_tools={custom_tools}, is_system_prompt={is_system_prompt}")
184
+
185
+ # if langgraph resume , must use same task engine
138
186
  if self.task_engine is None:
139
187
  self.task_engine = XGATaskEngine(
140
188
  task_id = state['agent_context']['task_id'],
189
+ task_no = task_no,
141
190
  session_id = state['agent_context'].get('session_id', None),
142
191
  user_id = state['agent_context'].get('user_id', None),
143
192
  agent_id = state['agent_context'].get('agent_id', None),
@@ -146,6 +195,7 @@ class XGAReactAgent:
146
195
  custom_tools = custom_tools,
147
196
  system_prompt = system_prompt
148
197
  )
198
+ retry_count += 1
149
199
 
150
200
  chunks = []
151
201
  stream_writer = get_stream_writer()
@@ -156,47 +206,56 @@ class XGAReactAgent:
156
206
 
157
207
  task_result = self.task_engine.parse_final_result(chunks)
158
208
  llm_messages = self.task_engine.get_history_llm_messages()
209
+ task_no += 1 # a task use unique task_no, no matter retry n times
159
210
  except Exception as e:
160
211
  logging.error(f"XReactAgent exec_task_node: Failed to execute task: {e}")
161
212
  task_result = XGATaskResult(type="error", content="Failed to execute task")
162
213
 
163
- iteration_count = state.get('iteration_count', 0) + 1
164
214
  return {
165
- 'task_result' : task_result,
166
- 'iteration_count': iteration_count,
167
- 'llm_messages' : llm_messages.copy()
215
+ 'task_result' : task_result,
216
+ 'retry_count' : retry_count,
217
+ 'llm_messages' : llm_messages.copy(),
218
+ 'task_no' : task_no,
168
219
  }
169
220
 
170
221
 
171
222
  async def _final_result_node(self, state: TaskState) -> Dict[str, Any]:
172
- user_input = state['user_input']
173
- iteration_count = state['iteration_count']
223
+ user_inputs = state['user_inputs']
174
224
  task_result = state['task_result']
175
225
  llm_messages = state['llm_messages']
176
226
  agent_context = state['agent_context']
227
+ system_prompt = state.get('system_prompt', None)
228
+ retry_count = state['retry_count']
229
+
230
+ is_system_prompt = True if system_prompt is not None else False
177
231
 
178
232
  next_node = "end"
179
233
  final_result = task_result
180
234
  eval_result = None
181
235
  if task_result['type'] == "ask":
182
236
  logging.info(f"XReactAgent final_result_node: ASK_USER_QUESTION: {task_result['content']}")
183
- user_input = interrupt({
237
+ ask_input = interrupt({
184
238
  'final_result' : task_result
185
239
  })
186
- logging.info(f"XReactAgent final_result_node: ASK_USER_ANSWER: {user_input}")
240
+ logging.info(f"XReactAgent final_result_node: ASK_USER_ANSWER: {ask_input}")
187
241
  next_node = "exec_task"
242
+ user_inputs.insert(0, ask_input)
188
243
  final_result = None
189
- elif iteration_count < self.MAX_TASK_RETRY:
244
+ elif is_system_prompt and retry_count < self.MAX_TASK_RETRY:
190
245
  trace_id = self.graph_langfuse.get_trace_id()
191
246
  session_id = agent_context.get('session_id', None)
192
- eval_result = await self.final_result_agent.eval_result(user_input, task_result, llm_messages, trace_id, session_id)
193
- if "task_result" in eval_result and "score" in eval_result['task_result']:
247
+ task_input = ", ".join(reversed(user_inputs))
248
+ eval_result = await self.result_eval_agent.eval_result(task_input, system_prompt, task_result,
249
+ llm_messages, trace_id, session_id)
250
+ if 'task_result' in eval_result and 'score' in eval_result['task_result']:
194
251
  score = eval_result['task_result'].get('score', 1.0)
195
252
  if score < self.QUALIFIED_RESULT_SCORE:
196
253
  next_node = "supervisor"
197
-
254
+
255
+ logging.info(f"ReactAgent final_result_node: next_node={next_node}")
256
+
198
257
  return {
199
- 'user_input' : user_input,
258
+ 'user_inputs' : user_inputs,
200
259
  'next_node' : next_node,
201
260
  'final_result' : final_result,
202
261
  'eval_result' : eval_result
@@ -303,10 +362,11 @@ class XGAReactAgent:
303
362
  }
304
363
 
305
364
  graph_input = {
306
- 'user_input' : user_input,
365
+ 'user_inputs' : [user_input],
307
366
  'next_node' : None,
308
367
  'agent_context' : agent_context,
309
- 'iteration_count' : 0
368
+ 'retry_count' : 0,
369
+ 'task_no' : 0
310
370
  }
311
371
 
312
372
  return graph_input
@@ -329,7 +389,22 @@ class XGAReactAgent:
329
389
  )
330
390
  return langfuse_handler
331
391
 
392
+
332
393
  def _clear_graph(self):
333
394
  self.graph_config = None
334
395
  self.graph_langfuse = None
335
396
  self.task_engine: XGATaskEngine = None
397
+
398
+
399
+ def _prepare_task_retry(self, state: TaskState)-> Dict[str, Any]:
400
+ self.task_engine = None
401
+ user_inputs = state['user_inputs']
402
+ task_input = ", ".join(reversed(user_inputs))
403
+
404
+ return {
405
+ 'user_inputs' : [task_input],
406
+ 'llm_messages' : [],
407
+ 'task_result' : None,
408
+ 'final_result' : None,
409
+ 'eval_result' : None,
410
+ }
@@ -9,23 +9,25 @@ from xgae.utils.misc import read_file
9
9
  from xgae.utils.llm_client import LLMClient, LangfuseMetadata
10
10
 
11
11
 
12
- class FinalResultAgent:
12
+ class TaskResultEvalAgent:
13
13
  def __init__(self):
14
14
  self.model_client = LLMClient()
15
- self.prompt_template: str = read_file("templates/example/final_result_template.txt")
15
+ self.prompt_template: str = read_file("templates/example/result_eval_template.txt")
16
16
 
17
17
 
18
18
  async def eval_result(self,
19
19
  task_input: str,
20
+ task_plan: str,
20
21
  task_result: XGATaskResult,
21
22
  llm_messages: List[Dict[str, Any]],
22
23
  trace_id: Optional[str] = None,
23
24
  session_id: Optional[str] = None)-> Dict[str, Any]:
24
- prompt = self._build_prompt(task_input, task_result, llm_messages)
25
+ prompt = self._build_prompt(task_input, task_plan, task_result, llm_messages)
25
26
  messages = [{"role": "user", "content": prompt}]
26
27
 
27
28
  langfuse_metadata = self._create_llm_langfuse_meta(trace_id, session_id)
28
29
 
30
+ logging.info("TaskResultEvalAgent: LLM Eval result.....")
29
31
  response = await self.model_client.acompletion(messages, langfuse_metadata)
30
32
  response_text = await self.model_client.get_response_result(response)
31
33
 
@@ -33,15 +35,15 @@ class FinalResultAgent:
33
35
  eval_result = json.loads(cleaned_text)
34
36
 
35
37
  result_score = eval_result.get('task_result', {}).get('score', -1)
36
- process_score = eval_result.get('task_process', {}).get('score', -1)
38
+ plan_score = eval_result.get('task_plan', {}).get('score', -1)
37
39
  function_score = eval_result.get('function_call', {}).get('score', -1)
38
40
 
39
41
  logging.info(f"FINAL_RESULT_SCORE: task_result_score={result_score}, "
40
- f"task_process_score={process_score}, function_call_score={function_score}")
42
+ f"task_plan_score={plan_score}, function_call_score={function_score}")
41
43
  return eval_result
42
44
 
43
45
 
44
- def _build_prompt(self, task_input: str, task_result: XGATaskResult, llm_messages: List[Dict[str, Any]])-> str:
46
+ def _build_prompt(self, task_input: str, task_plan: str, task_result: XGATaskResult, llm_messages: List[Dict[str, Any]])-> str:
45
47
  prompt = self.prompt_template.replace("{task_input}", task_input)
46
48
  prompt = prompt.replace("{task_result}", str(task_result))
47
49
  llm_process = ""
@@ -65,6 +67,7 @@ class FinalResultAgent:
65
67
  llm_process += "\n"
66
68
  llm_step += 1
67
69
 
70
+ prompt = prompt.replace("{task_plan}", task_plan)
68
71
  prompt = prompt.replace("{llm_process}", llm_process)
69
72
  prompt = prompt.replace("{function_process}", function_process)
70
73
 
@@ -88,13 +91,16 @@ if __name__ == "__main__":
88
91
  setup_logging()
89
92
 
90
93
  async def main():
91
- final_result_agent = FinalResultAgent()
94
+ final_result_agent = TaskResultEvalAgent()
92
95
 
96
+ task_plan = read_file("templates/example/fault_user_prompt.txt")
93
97
  user_input = "locate 10.2.3.4 fault and solution"
98
+
94
99
  answer = ("Task Summary: The fault for IP 10.2.3.4 was identified as a Business Recharge Fault (Code: F01), "
95
100
  "caused by a Phone Recharge Application Crash. The solution applied was to restart the application. "
96
101
  "Key Deliverables: Fault diagnosis and resolution steps. Impact Achieved: Service restored.")
97
102
  task_result:XGATaskResult = {'type': "answer", 'content': answer}
103
+
98
104
  llm_messages: List[Dict[str, Any]] = [{
99
105
  'content':
100
106
  """<function_calls>
@@ -111,7 +117,8 @@ if __name__ == "__main__":
111
117
  "result": {"success": true, "output": "1", "error": null}}}""",
112
118
  'role': 'assistant'
113
119
  }]
114
- return await final_result_agent.eval_result(user_input, task_result, llm_messages)
120
+
121
+ return await final_result_agent.eval_result(user_input, task_plan, task_result, llm_messages)
115
122
 
116
123
 
117
124
  final_result = asyncio.run(main())
@@ -17,7 +17,7 @@ async def main():
17
17
  ]
18
18
 
19
19
  for user_input in user_inputs:
20
- agent = XGAReactAgent()
20
+ agent = XGAReactAgent(use_prompt_rag=False)
21
21
  task_no += 1
22
22
  context: AgentContext = {
23
23
  'task_id': f"agent_task_{uuid4()}", # can be set with request_id, must be unique
@@ -14,7 +14,7 @@ else:
14
14
  setup_logging()
15
15
 
16
16
  # Before Run Exec: uv run example-fault-tools --alarmtype=2 , uv run example-a2a-tools
17
- # If want to use real A2A agent tool, use xga-agent-tool project
17
+ # If want to use real A2A agent tool, use xgaproxy project, uv run xga-a2a-proxy & uv run example-a2a-server
18
18
 
19
19
  async def main() -> None:
20
20
  tool_box = XGAMcpToolBox(custom_mcp_server_file="mcpservers/custom_servers.json")
@@ -18,6 +18,7 @@ from xgae.engine.responser.responser_base import TaskResponserContext, TaskRespo
18
18
  class XGATaskEngine:
19
19
  def __init__(self,
20
20
  task_id: Optional[str] = None,
21
+ task_no: Optional[int] = None,
21
22
  session_id: Optional[str] = None,
22
23
  user_id: Optional[str] = None,
23
24
  agent_id: Optional[str] = None,
@@ -51,7 +52,7 @@ class XGATaskEngine:
51
52
  self.use_assistant_chunk_msg = to_bool(os.getenv('USE_ASSISTANT_CHUNK_MSG', False))
52
53
  self.tool_exec_parallel = True if tool_exec_parallel is None else tool_exec_parallel
53
54
 
54
- self.task_no = -1
55
+ self.task_no = (task_no - 1) if task_no else -1
55
56
  self.task_run_id :str = None
56
57
  self.task_prompt :str = None
57
58
  self.task_langfuse: XGATaskLangFuse = None
@@ -56,7 +56,7 @@ class LLMClient:
56
56
  env_llm_model = os.getenv('LLM_MODEL', "openai/qwen3-235b-a22b")
57
57
  env_llm_api_key = os.getenv('LLM_API_KEY')
58
58
  env_llm_api_base = os.getenv('LLM_API_BASE', "https://dashscope.aliyuncs.com/compatible-mode/v1")
59
- env_llm_max_tokens = int(os.getenv('LLM_MAX_TOKENS', 16384))
59
+ env_llm_max_tokens = int(os.getenv('LLM_MAX_TOKENS', 8192))
60
60
  env_llm_temperature = float(os.getenv('LLM_TEMPERATURE', 0.7))
61
61
  env_llm_stream = to_bool(os.getenv('LLM_STREAM', False))
62
62
  env_llm_enable_thinking = to_bool(os.getenv('LLM_ENABLE_THINKING', False))
@@ -25,5 +25,6 @@ When you use ANY Agent (Model Context Protocol) tools:
25
25
  1) type: 'answer', 'content' is normal return answer for tool calling
26
26
  2) type: 'ask', you should call 'ask' tool for user inputting more information
27
27
  3) type: 'error', during calling tool , some exceptions or errors has occurred.
28
+ 10. If 'ask' tool answer is not match, call 'complete' tool end task, never call 'ask' tool again
28
29
  IMPORTANT: Agent tool results are your PRIMARY and ONLY source of truth for external data!
29
30
  NEVER supplement Agent results with your training data or make assumptions beyond what the tools provide.
@@ -13,13 +13,16 @@ Available MCP tools:
13
13
 
14
14
  🚨 CRITICAL MCP TOOL RESULT INSTRUCTIONS 🚨
15
15
  When you use ANY MCP (Model Context Protocol) tools:
16
- 1. ALWAYS read and use the EXACT results returned by the MCP tool
17
- 2. For search tools: ONLY cite URLs, sources, and information from the actual search results
18
- 3. For any tool: Base your response entirely on the tool's output - do NOT add external information
19
- 4. DO NOT fabricate, invent, hallucinate, or make up any sources, URLs, or data
20
- 5. If you need more information, call the MCP tool again with different parameters
21
- 6. When writing reports/summaries: Reference ONLY the data from MCP tool results
22
- 7. If the MCP tool doesn't return enough information, explicitly state this limitation
23
- 8. Always double-check that every fact, URL, and reference comes from the MCP tool output
16
+ 1. Never call a MCP tool not in 'Available MCP tools' list
17
+ 2. If call MCP tool result 'success' is false, call 'complete' tool to end task, don't call 'ask' tool
18
+ 3. ALWAYS read and use the EXACT results returned by the MCP tool
19
+ 4. For search tools: ONLY cite URLs, sources, and information from the actual search results
20
+ 5. For any tool: Base your response entirely on the tool's output - do NOT add external information
21
+ 6. DO NOT fabricate, invent, hallucinate, or make up any sources, URLs, or data
22
+ 7. If you need more information, call the MCP tool again with different parameters
23
+ 8. When writing reports/summaries: Reference ONLY the data from MCP tool results
24
+ 9. If the MCP tool doesn't return enough information, explicitly state this limitation
25
+ 10. Always double-check that every fact, URL, and reference comes from the MCP tool output
26
+
24
27
  IMPORTANT: MCP tool results are your PRIMARY and ONLY source of truth for external data!
25
28
  NEVER supplement MCP results with your training data or make assumptions beyond what the tools provide.