xgae 0.1.18__tar.gz → 0.1.20__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- xgae-0.1.18/release.md → xgae-0.1.20/CHANGELOG.md +11 -0
- {xgae-0.1.18 → xgae-0.1.20}/PKG-INFO +2 -1
- {xgae-0.1.18 → xgae-0.1.20}/pyproject.toml +3 -2
- xgae-0.1.20/src/examples/agent/langgraph/react/agent_base.py +31 -0
- xgae-0.1.20/src/examples/agent/langgraph/react/final_result_agent.py +119 -0
- xgae-0.1.20/src/examples/agent/langgraph/react/react_agent.py +335 -0
- xgae-0.1.20/src/examples/agent/langgraph/react/run_react_agent.py +64 -0
- {xgae-0.1.18 → xgae-0.1.20}/src/examples/engine/run_custom_and_agent_tools.py +6 -3
- {xgae-0.1.18 → xgae-0.1.20}/src/examples/engine/run_general_tools.py +1 -1
- {xgae-0.1.18 → xgae-0.1.20}/src/examples/engine/run_human_in_loop.py +2 -2
- {xgae-0.1.18 → xgae-0.1.20}/src/examples/engine/run_simple.py +1 -1
- {xgae-0.1.18 → xgae-0.1.20}/src/examples/tools/simu_a2a_tools_app.py +2 -5
- xgae-0.1.20/src/xgae/__init__.py +4 -0
- {xgae-0.1.18 → xgae-0.1.20}/src/xgae/engine/task_engine.py +41 -30
- {xgae-0.1.18 → xgae-0.1.20}/src/xgae/engine/task_langfuse.py +21 -7
- xgae-0.1.18/src/xgae/cli_app.py → xgae-0.1.20/src/xgae/engine_cli_app.py +16 -19
- {xgae-0.1.18 → xgae-0.1.20}/src/xgae/utils/llm_client.py +27 -5
- xgae-0.1.20/templates/agent_tool_prompt_template.txt +29 -0
- xgae-0.1.20/templates/example/final_result_template.txt +61 -0
- xgae-0.1.20/uv.lock +1463 -0
- xgae-0.1.18/src/examples/agent/langgraph/react/final_result_agent.py +0 -59
- xgae-0.1.18/src/examples/agent/langgraph/react/react_agent.py +0 -209
- xgae-0.1.18/src/xgae/__init__.py +0 -4
- xgae-0.1.18/templates/agent_tool_prompt_template.txt +0 -25
- xgae-0.1.18/templates/example/final_result_template.txt +0 -19
- xgae-0.1.18/uv.lock +0 -1386
- {xgae-0.1.18 → xgae-0.1.20}/.env +0 -0
- {xgae-0.1.18 → xgae-0.1.20}/.python-version +0 -0
- {xgae-0.1.18 → xgae-0.1.20}/README.md +0 -0
- {xgae-0.1.18 → xgae-0.1.20}/mcpservers/custom_servers.json +0 -0
- {xgae-0.1.18 → xgae-0.1.20}/mcpservers/xga_server.json +0 -0
- {xgae-0.1.18 → xgae-0.1.20}/mcpservers/xga_server_sse.json +0 -0
- {xgae-0.1.18 → xgae-0.1.20}/src/examples/tools/custom_fault_tools_app.py +0 -0
- {xgae-0.1.18 → xgae-0.1.20}/src/xgae/engine/engine_base.py +0 -0
- {xgae-0.1.18 → xgae-0.1.20}/src/xgae/engine/mcp_tool_box.py +0 -0
- {xgae-0.1.18 → xgae-0.1.20}/src/xgae/engine/prompt_builder.py +0 -0
- {xgae-0.1.18 → xgae-0.1.20}/src/xgae/engine/responser/non_stream_responser.py +0 -0
- {xgae-0.1.18 → xgae-0.1.20}/src/xgae/engine/responser/responser_base.py +0 -0
- {xgae-0.1.18 → xgae-0.1.20}/src/xgae/engine/responser/stream_responser.py +0 -0
- {xgae-0.1.18 → xgae-0.1.20}/src/xgae/tools/without_general_tools_app.py +0 -0
- {xgae-0.1.18 → xgae-0.1.20}/src/xgae/utils/__init__.py +0 -0
- {xgae-0.1.18 → xgae-0.1.20}/src/xgae/utils/json_helpers.py +0 -0
- {xgae-0.1.18 → xgae-0.1.20}/src/xgae/utils/misc.py +0 -0
- {xgae-0.1.18 → xgae-0.1.20}/src/xgae/utils/setup_env.py +0 -0
- {xgae-0.1.18 → xgae-0.1.20}/src/xgae/utils/xml_tool_parser.py +0 -0
- {xgae-0.1.18 → xgae-0.1.20}/templates/custom_tool_prompt_template.txt +0 -0
- {xgae-0.1.18 → xgae-0.1.20}/templates/example/fault_user_prompt.txt +0 -0
- {xgae-0.1.18 → xgae-0.1.20}/templates/gemini_system_prompt_template.txt +0 -0
- {xgae-0.1.18 → xgae-0.1.20}/templates/general_tool_prompt_template.txt +0 -0
- {xgae-0.1.18 → xgae-0.1.20}/templates/system_prompt_response_sample.txt +0 -0
- {xgae-0.1.18 → xgae-0.1.20}/templates/system_prompt_template.txt +0 -0
- {xgae-0.1.18 → xgae-0.1.20}/test/test_langfuse.py +0 -0
- {xgae-0.1.18 → xgae-0.1.20}/test/test_litellm_langfuse.py +0 -0
|
@@ -1,3 +1,14 @@
|
|
|
1
|
+
# Release Changelog
|
|
2
|
+
## [0.1.20] - 2025-9-8
|
|
3
|
+
### Added
|
|
4
|
+
- Example: Langgraph react agent add final_result_agent
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
## [0.1.19] - 2025-9-8
|
|
8
|
+
### Added
|
|
9
|
+
- Example: Langgraph react agent release V1, full logic but no final result agent and tool select agent
|
|
10
|
+
|
|
11
|
+
|
|
1
12
|
# Release Changelog
|
|
2
13
|
## [0.1.18] - 2025-9-3
|
|
3
14
|
### Added
|
|
@@ -1,10 +1,11 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: xgae
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.20
|
|
4
4
|
Summary: Extreme General Agent Engine
|
|
5
5
|
Requires-Python: >=3.13
|
|
6
6
|
Requires-Dist: colorlog==6.9.0
|
|
7
7
|
Requires-Dist: langchain-mcp-adapters==0.1.9
|
|
8
|
+
Requires-Dist: langchain==0.3.27
|
|
8
9
|
Requires-Dist: langfuse==2.60.9
|
|
9
10
|
Requires-Dist: langgraph==0.6.5
|
|
10
11
|
Requires-Dist: litellm==1.74.15
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
[project]
|
|
2
2
|
name = "xgae"
|
|
3
|
-
version = "0.1.
|
|
3
|
+
version = "0.1.20"
|
|
4
4
|
description = "Extreme General Agent Engine"
|
|
5
5
|
readme = "README.md"
|
|
6
6
|
requires-python = ">=3.13"
|
|
@@ -11,6 +11,7 @@ dependencies = [
|
|
|
11
11
|
"litellm==1.74.15",
|
|
12
12
|
"mcp==1.13.0",
|
|
13
13
|
"langfuse==2.60.9",
|
|
14
|
+
"langchain==0.3.27",
|
|
14
15
|
]
|
|
15
16
|
|
|
16
17
|
[build-system]
|
|
@@ -21,7 +22,7 @@ build-backend = "hatchling.build"
|
|
|
21
22
|
exclude = ["log/*", ".idea/*"]
|
|
22
23
|
|
|
23
24
|
[project.scripts]
|
|
24
|
-
xgae = "xgae.
|
|
25
|
+
xgae = "xgae.engine_cli_app:main"
|
|
25
26
|
xgae-tools = "xgae.tools.without_general_tools_app:main"
|
|
26
27
|
example-fault-tools = "examples.tools.custom_fault_tools_app:main"
|
|
27
28
|
example-a2a-tools = "examples.tools.simu_a2a_tools_app:main"
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
from typing import Any, Dict, List, TypedDict, Optional
|
|
2
|
+
from xgae.engine.engine_base import XGATaskResult
|
|
3
|
+
|
|
4
|
+
class EvaluateResult(TypedDict, total=False):
|
|
5
|
+
task_result: Dict[str, Any]
|
|
6
|
+
task_process: Dict[str, Any]
|
|
7
|
+
function_call: Dict[str, Any]
|
|
8
|
+
|
|
9
|
+
class AgentContext(TypedDict, total=False):
|
|
10
|
+
task_id: str
|
|
11
|
+
session_id: str
|
|
12
|
+
user_id: str
|
|
13
|
+
agent_id: str
|
|
14
|
+
thread_id: str
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class TaskState(TypedDict, total=False):
|
|
18
|
+
"""State definition for the agent orchestration graph"""
|
|
19
|
+
llm_messages: List[Dict[str, Any]]
|
|
20
|
+
user_input: str
|
|
21
|
+
next_node: str
|
|
22
|
+
system_prompt: str
|
|
23
|
+
custom_tools: List[str]
|
|
24
|
+
general_tools: List[str]
|
|
25
|
+
task_result: XGATaskResult
|
|
26
|
+
final_result: XGATaskResult
|
|
27
|
+
eval_result: EvaluateResult
|
|
28
|
+
iteration_count: int
|
|
29
|
+
agent_context: AgentContext
|
|
30
|
+
|
|
31
|
+
|
|
@@ -0,0 +1,119 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import logging
|
|
3
|
+
import re
|
|
4
|
+
|
|
5
|
+
from typing import Any, Dict, List, Optional
|
|
6
|
+
|
|
7
|
+
from xgae.engine.engine_base import XGATaskResult
|
|
8
|
+
from xgae.utils.misc import read_file
|
|
9
|
+
from xgae.utils.llm_client import LLMClient, LangfuseMetadata
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class FinalResultAgent:
|
|
13
|
+
def __init__(self):
|
|
14
|
+
self.model_client = LLMClient()
|
|
15
|
+
self.prompt_template: str = read_file("templates/example/final_result_template.txt")
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
async def eval_result(self,
|
|
19
|
+
task_input: str,
|
|
20
|
+
task_result: XGATaskResult,
|
|
21
|
+
llm_messages: List[Dict[str, Any]],
|
|
22
|
+
trace_id: Optional[str] = None,
|
|
23
|
+
session_id: Optional[str] = None)-> Dict[str, Any]:
|
|
24
|
+
prompt = self._build_prompt(task_input, task_result, llm_messages)
|
|
25
|
+
messages = [{"role": "user", "content": prompt}]
|
|
26
|
+
|
|
27
|
+
langfuse_metadata = self._create_llm_langfuse_meta(trace_id, session_id)
|
|
28
|
+
|
|
29
|
+
response = await self.model_client.acompletion(messages, langfuse_metadata)
|
|
30
|
+
response_text = await self.model_client.get_response_result(response)
|
|
31
|
+
|
|
32
|
+
cleaned_text = re.sub(r'^\s*```json|```\s*$', '', response_text, flags=re.MULTILINE).strip()
|
|
33
|
+
eval_result = json.loads(cleaned_text)
|
|
34
|
+
|
|
35
|
+
result_score = eval_result.get('task_result', {}).get('score', -1)
|
|
36
|
+
process_score = eval_result.get('task_process', {}).get('score', -1)
|
|
37
|
+
function_score = eval_result.get('function_call', {}).get('score', -1)
|
|
38
|
+
|
|
39
|
+
logging.info(f"FINAL_RESULT_SCORE: task_result_score={result_score}, "
|
|
40
|
+
f"task_process_score={process_score}, function_call_score={function_score}")
|
|
41
|
+
return eval_result
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def _build_prompt(self, task_input: str, task_result: XGATaskResult, llm_messages: List[Dict[str, Any]])-> str:
|
|
45
|
+
prompt = self.prompt_template.replace("{task_input}", task_input)
|
|
46
|
+
prompt = prompt.replace("{task_result}", str(task_result))
|
|
47
|
+
llm_process = ""
|
|
48
|
+
function_process = ""
|
|
49
|
+
llm_step = 1
|
|
50
|
+
function_step = 1
|
|
51
|
+
for llm_message in llm_messages:
|
|
52
|
+
content = llm_message.get('content', '')
|
|
53
|
+
if "tool_execution" in content:
|
|
54
|
+
function_process += f"{function_step}. \n"
|
|
55
|
+
tool_exec = json.loads(content)
|
|
56
|
+
func_call = tool_exec['tool_execution']
|
|
57
|
+
func_call.pop('xml_tag_name')
|
|
58
|
+
clear_content = json.dumps(func_call, indent=2)
|
|
59
|
+
function_process += clear_content
|
|
60
|
+
function_process += "\n"
|
|
61
|
+
function_step += 1
|
|
62
|
+
else:
|
|
63
|
+
llm_process += f"{llm_step}. \n"
|
|
64
|
+
llm_process += content
|
|
65
|
+
llm_process += "\n"
|
|
66
|
+
llm_step += 1
|
|
67
|
+
|
|
68
|
+
prompt = prompt.replace("{llm_process}", llm_process)
|
|
69
|
+
prompt = prompt.replace("{function_process}", function_process)
|
|
70
|
+
|
|
71
|
+
return prompt
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def _create_llm_langfuse_meta(self, trace_id:str, session_id: str)-> LangfuseMetadata:
|
|
75
|
+
generation_name = "xga_agent_final_result_completion"
|
|
76
|
+
|
|
77
|
+
return LangfuseMetadata(
|
|
78
|
+
generation_name = generation_name,
|
|
79
|
+
existing_trace_id = trace_id,
|
|
80
|
+
session_id = session_id
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
if __name__ == "__main__":
|
|
86
|
+
import asyncio
|
|
87
|
+
from xgae.utils.setup_env import setup_logging
|
|
88
|
+
setup_logging()
|
|
89
|
+
|
|
90
|
+
async def main():
|
|
91
|
+
final_result_agent = FinalResultAgent()
|
|
92
|
+
|
|
93
|
+
user_input = "locate 10.2.3.4 fault and solution"
|
|
94
|
+
answer = ("Task Summary: The fault for IP 10.2.3.4 was identified as a Business Recharge Fault (Code: F01), "
|
|
95
|
+
"caused by a Phone Recharge Application Crash. The solution applied was to restart the application. "
|
|
96
|
+
"Key Deliverables: Fault diagnosis and resolution steps. Impact Achieved: Service restored.")
|
|
97
|
+
task_result:XGATaskResult = {'type': "answer", 'content': answer}
|
|
98
|
+
llm_messages: List[Dict[str, Any]] = [{
|
|
99
|
+
'content':
|
|
100
|
+
"""<function_calls>
|
|
101
|
+
<invoke name="get_alarm_type">
|
|
102
|
+
<parameter name="alarm_id">alm0123</parameter>
|
|
103
|
+
</invoke>
|
|
104
|
+
</function_calls>'""",
|
|
105
|
+
'role': "assistant"
|
|
106
|
+
},{
|
|
107
|
+
'content': """{"tool_execution": {
|
|
108
|
+
"function_name": "get_alarm_type",
|
|
109
|
+
"xml_tag_name": "get-alarm-type",
|
|
110
|
+
"arguments": {"alarm_id": "alm0123"},
|
|
111
|
+
"result": {"success": true, "output": "1", "error": null}}}""",
|
|
112
|
+
'role': 'assistant'
|
|
113
|
+
}]
|
|
114
|
+
return await final_result_agent.eval_result(user_input, task_result, llm_messages)
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
final_result = asyncio.run(main())
|
|
118
|
+
final_result_json = json.dumps(final_result, ensure_ascii=False, indent=2)
|
|
119
|
+
print(f"FINAL_RESULT: {final_result_json} ")
|
|
@@ -0,0 +1,335 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import os
|
|
3
|
+
|
|
4
|
+
from typing import Any, Dict, List, Optional, AsyncGenerator
|
|
5
|
+
from uuid import uuid4
|
|
6
|
+
|
|
7
|
+
from langfuse.callback import CallbackHandler
|
|
8
|
+
from langfuse import Langfuse
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
from langgraph.graph import END, START, StateGraph
|
|
12
|
+
from langgraph.types import interrupt, Command
|
|
13
|
+
from langgraph.checkpoint.memory import MemorySaver
|
|
14
|
+
from langgraph.config import get_stream_writer
|
|
15
|
+
|
|
16
|
+
from xgae.utils.misc import read_file
|
|
17
|
+
from xgae.utils import log_trace
|
|
18
|
+
|
|
19
|
+
from xgae.engine.engine_base import XGATaskResult
|
|
20
|
+
from xgae.engine.mcp_tool_box import XGAMcpToolBox
|
|
21
|
+
from xgae.engine.task_engine import XGATaskEngine
|
|
22
|
+
|
|
23
|
+
from examples.agent.langgraph.react.agent_base import AgentContext, TaskState, EvaluateResult
|
|
24
|
+
from examples.agent.langgraph.react.final_result_agent import FinalResultAgent
|
|
25
|
+
|
|
26
|
+
class XGAReactAgent:
|
|
27
|
+
MAX_TASK_RETRY = 2
|
|
28
|
+
QUALIFIED_RESULT_SCORE = 0.7
|
|
29
|
+
|
|
30
|
+
def __init__(self):
|
|
31
|
+
self.graph = None
|
|
32
|
+
|
|
33
|
+
self.graph_config = None
|
|
34
|
+
self.graph_langfuse = None
|
|
35
|
+
self.task_engine: XGATaskEngine = None
|
|
36
|
+
|
|
37
|
+
self.tool_box = XGAMcpToolBox(custom_mcp_server_file="mcpservers/custom_servers.json")
|
|
38
|
+
self.final_result_agent = FinalResultAgent()
|
|
39
|
+
|
|
40
|
+
async def _create_graph(self) -> StateGraph:
|
|
41
|
+
try:
|
|
42
|
+
graph_builder = StateGraph(TaskState)
|
|
43
|
+
|
|
44
|
+
# Add nodes
|
|
45
|
+
graph_builder.add_node('supervisor', self._supervisor_node)
|
|
46
|
+
graph_builder.add_node('select_tool', self._select_tool_node)
|
|
47
|
+
graph_builder.add_node('exec_task', self._exec_task_node)
|
|
48
|
+
graph_builder.add_node('final_result', self._final_result_node)
|
|
49
|
+
|
|
50
|
+
# Add edges
|
|
51
|
+
graph_builder.add_edge(START, 'supervisor')
|
|
52
|
+
graph_builder.add_conditional_edges(
|
|
53
|
+
'supervisor',
|
|
54
|
+
self._next_condition,
|
|
55
|
+
{
|
|
56
|
+
'select_tool': 'select_tool',
|
|
57
|
+
'exec_task': 'exec_task',
|
|
58
|
+
'end': END
|
|
59
|
+
}
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
graph_builder.add_edge('select_tool', 'exec_task')
|
|
63
|
+
graph_builder.add_edge('exec_task', 'final_result')
|
|
64
|
+
|
|
65
|
+
graph_builder.add_conditional_edges(
|
|
66
|
+
'final_result',
|
|
67
|
+
self._next_condition,
|
|
68
|
+
{
|
|
69
|
+
'supervisor': 'supervisor',
|
|
70
|
+
'exec_task': 'exec_task',
|
|
71
|
+
'end': END
|
|
72
|
+
}
|
|
73
|
+
)
|
|
74
|
+
|
|
75
|
+
graph = graph_builder.compile(checkpointer=MemorySaver())
|
|
76
|
+
graph.name = "XGARectAgentGraph"
|
|
77
|
+
|
|
78
|
+
return graph
|
|
79
|
+
except Exception as e:
|
|
80
|
+
logging.error("Failed to create XGARectAgent Graph: %s", str(e))
|
|
81
|
+
raise
|
|
82
|
+
|
|
83
|
+
def _search_system_prompt(self, user_input: str) -> str:
|
|
84
|
+
# You should search RAG use user_input, fetch COT or Prompt for your business
|
|
85
|
+
system_prompt = None if "fault" not in user_input else read_file("templates/example/fault_user_prompt.txt")
|
|
86
|
+
return system_prompt
|
|
87
|
+
|
|
88
|
+
async def _supervisor_node(self, state: TaskState) -> Dict[str, Any]:
|
|
89
|
+
user_input = state['user_input']
|
|
90
|
+
eval_result = state.get('eval_result', None)
|
|
91
|
+
|
|
92
|
+
system_prompt = self._search_system_prompt(user_input)
|
|
93
|
+
|
|
94
|
+
general_tools = [] if system_prompt else ["*"]
|
|
95
|
+
custom_tools = ["*"] if system_prompt else []
|
|
96
|
+
|
|
97
|
+
if eval_result and 'function_call' in eval_result and 'score' in eval_result['function_call']:
|
|
98
|
+
score = eval_result['function_call'].get('score', 1.0)
|
|
99
|
+
if score < self.QUALIFIED_RESULT_SCORE:
|
|
100
|
+
next_node = "select_tool"
|
|
101
|
+
else:
|
|
102
|
+
next_node = "end"
|
|
103
|
+
else:
|
|
104
|
+
next_node = "select_tool" if system_prompt else "exec_task"
|
|
105
|
+
|
|
106
|
+
return {
|
|
107
|
+
'system_prompt' : system_prompt,
|
|
108
|
+
'next_node' : next_node,
|
|
109
|
+
'general_tools' : general_tools,
|
|
110
|
+
'custom_tools' : custom_tools,
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
def _select_custom_tools(self, system_prompt: str) -> list[str]:
|
|
114
|
+
custom_tools = ["*"] if system_prompt else []
|
|
115
|
+
return custom_tools
|
|
116
|
+
|
|
117
|
+
async def _select_tool_node(self, state: TaskState) -> Dict[str, Any]:
|
|
118
|
+
system_prompt = state.get('system_prompt',None)
|
|
119
|
+
general_tools = []
|
|
120
|
+
custom_tools = self._select_custom_tools(system_prompt)
|
|
121
|
+
return {
|
|
122
|
+
'general_tools' : general_tools,
|
|
123
|
+
'custom_tools' : custom_tools,
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
async def _exec_task_node(self, state: TaskState) -> Dict[str, Any]:
|
|
127
|
+
user_input = state['user_input']
|
|
128
|
+
system_prompt = state.get('system_prompt',None)
|
|
129
|
+
general_tools = state.get('general_tools',[])
|
|
130
|
+
custom_tools = state.get('custom_tools',[])
|
|
131
|
+
is_system_prompt = True if system_prompt is not None else False
|
|
132
|
+
|
|
133
|
+
trace_id = self.graph_langfuse.get_trace_id()
|
|
134
|
+
llm_messages = []
|
|
135
|
+
try:
|
|
136
|
+
logging.info(f"🔥 XGATaskEngine run_task: user_input={user_input}, general_tools={general_tools}, "
|
|
137
|
+
f"custom_tools={custom_tools}, is_system_prompt={is_system_prompt}")
|
|
138
|
+
if self.task_engine is None:
|
|
139
|
+
self.task_engine = XGATaskEngine(
|
|
140
|
+
task_id = state['agent_context']['task_id'],
|
|
141
|
+
session_id = state['agent_context'].get('session_id', None),
|
|
142
|
+
user_id = state['agent_context'].get('user_id', None),
|
|
143
|
+
agent_id = state['agent_context'].get('agent_id', None),
|
|
144
|
+
tool_box = self.tool_box,
|
|
145
|
+
general_tools = general_tools,
|
|
146
|
+
custom_tools = custom_tools,
|
|
147
|
+
system_prompt = system_prompt
|
|
148
|
+
)
|
|
149
|
+
|
|
150
|
+
chunks = []
|
|
151
|
+
stream_writer = get_stream_writer()
|
|
152
|
+
async for chunk in self.task_engine.run_task(task_input={"role": "user", "content": user_input},
|
|
153
|
+
trace_id=trace_id):
|
|
154
|
+
chunks.append(chunk)
|
|
155
|
+
stream_writer({"engine_message": chunk})
|
|
156
|
+
|
|
157
|
+
task_result = self.task_engine.parse_final_result(chunks)
|
|
158
|
+
llm_messages = self.task_engine.get_history_llm_messages()
|
|
159
|
+
except Exception as e:
|
|
160
|
+
logging.error(f"XReactAgent exec_task_node: Failed to execute task: {e}")
|
|
161
|
+
task_result = XGATaskResult(type="error", content="Failed to execute task")
|
|
162
|
+
|
|
163
|
+
iteration_count = state.get('iteration_count', 0) + 1
|
|
164
|
+
return {
|
|
165
|
+
'task_result' : task_result,
|
|
166
|
+
'iteration_count': iteration_count,
|
|
167
|
+
'llm_messages' : llm_messages.copy()
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
|
|
171
|
+
async def _final_result_node(self, state: TaskState) -> Dict[str, Any]:
|
|
172
|
+
user_input = state['user_input']
|
|
173
|
+
iteration_count = state['iteration_count']
|
|
174
|
+
task_result = state['task_result']
|
|
175
|
+
llm_messages = state['llm_messages']
|
|
176
|
+
agent_context = state['agent_context']
|
|
177
|
+
|
|
178
|
+
next_node = "end"
|
|
179
|
+
final_result = task_result
|
|
180
|
+
eval_result = None
|
|
181
|
+
if task_result['type'] == "ask":
|
|
182
|
+
logging.info(f"XReactAgent final_result_node: ASK_USER_QUESTION: {task_result['content']}")
|
|
183
|
+
user_input = interrupt({
|
|
184
|
+
'final_result' : task_result
|
|
185
|
+
})
|
|
186
|
+
logging.info(f"XReactAgent final_result_node: ASK_USER_ANSWER: {user_input}")
|
|
187
|
+
next_node = "exec_task"
|
|
188
|
+
final_result = None
|
|
189
|
+
elif iteration_count < self.MAX_TASK_RETRY:
|
|
190
|
+
trace_id = self.graph_langfuse.get_trace_id()
|
|
191
|
+
session_id = agent_context.get('session_id', None)
|
|
192
|
+
eval_result = await self.final_result_agent.eval_result(user_input, task_result, llm_messages, trace_id, session_id)
|
|
193
|
+
if "task_result" in eval_result and "score" in eval_result['task_result']:
|
|
194
|
+
score = eval_result['task_result'].get('score', 1.0)
|
|
195
|
+
if score < self.QUALIFIED_RESULT_SCORE:
|
|
196
|
+
next_node = "supervisor"
|
|
197
|
+
|
|
198
|
+
return {
|
|
199
|
+
'user_input' : user_input,
|
|
200
|
+
'next_node' : next_node,
|
|
201
|
+
'final_result' : final_result,
|
|
202
|
+
'eval_result' : eval_result
|
|
203
|
+
}
|
|
204
|
+
|
|
205
|
+
|
|
206
|
+
def _next_condition(self, state: TaskState) -> str:
|
|
207
|
+
next_node = state['next_node']
|
|
208
|
+
return next_node
|
|
209
|
+
|
|
210
|
+
|
|
211
|
+
async def generate_with_result(self, user_input: str,
|
|
212
|
+
agent_context: Optional[AgentContext] = None,
|
|
213
|
+
is_resume: Optional[bool]=False) -> XGATaskResult:
|
|
214
|
+
agent_context = agent_context or {}
|
|
215
|
+
try:
|
|
216
|
+
|
|
217
|
+
if is_resume:
|
|
218
|
+
logging.info(f"=== Start React Agent for USER_ASK_ANSWER: {user_input}")
|
|
219
|
+
graph_input = Command(resume=user_input)
|
|
220
|
+
else:
|
|
221
|
+
logging.info(f"=== Start React Agent for USER_INPUT: {user_input}")
|
|
222
|
+
graph_input = await self._prepare_graph_start(user_input, agent_context)
|
|
223
|
+
|
|
224
|
+
final_state = await self.graph.ainvoke(graph_input, config=self.graph_config)
|
|
225
|
+
|
|
226
|
+
if "__interrupt__" in final_state:
|
|
227
|
+
interrupt_event = final_state["__interrupt__"][0]
|
|
228
|
+
interrupt_value = interrupt_event.value
|
|
229
|
+
result = interrupt_value['final_result']
|
|
230
|
+
else:
|
|
231
|
+
result = final_state['final_result']
|
|
232
|
+
|
|
233
|
+
return result
|
|
234
|
+
except Exception as e:
|
|
235
|
+
log_trace(e, f"XReactAgent generate: user_input={user_input}")
|
|
236
|
+
result = XGATaskResult(type="error", content=f"React Agent error: {e}")
|
|
237
|
+
return result
|
|
238
|
+
|
|
239
|
+
|
|
240
|
+
async def generate(self, user_input: str,
|
|
241
|
+
agent_context: Optional[AgentContext]=None,
|
|
242
|
+
is_resume: Optional[bool]=False) -> AsyncGenerator[Dict[str, Any], None]:
|
|
243
|
+
agent_context = agent_context or {}
|
|
244
|
+
try:
|
|
245
|
+
if is_resume:
|
|
246
|
+
logging.info(f"=== Start React Stream Agent for USER_ASK_ANSWER: {user_input}")
|
|
247
|
+
graph_input = Command(resume=user_input)
|
|
248
|
+
else:
|
|
249
|
+
logging.info(f"=== Start React Stream Agent USER_ASK_ANSWER: {user_input}")
|
|
250
|
+
graph_input = await self._prepare_graph_start(user_input, agent_context)
|
|
251
|
+
|
|
252
|
+
async for msg_type, message in self.graph.astream(input=graph_input,
|
|
253
|
+
config=self.graph_config,
|
|
254
|
+
stream_mode=["custom", "updates"]):
|
|
255
|
+
if msg_type == "updates" and '__interrupt__' in message:
|
|
256
|
+
interrupt_event = message["__interrupt__"][0]
|
|
257
|
+
interrupt_value = interrupt_event.value
|
|
258
|
+
final_result = interrupt_value['final_result']
|
|
259
|
+
yield final_result
|
|
260
|
+
elif msg_type == "updates" and 'final_result' in message:
|
|
261
|
+
message = message['final_result']
|
|
262
|
+
final_result = message.get('final_result', None)
|
|
263
|
+
if final_result:
|
|
264
|
+
yield final_result
|
|
265
|
+
elif msg_type == "custom" and 'engine_message' in message:
|
|
266
|
+
message = {'type': "message", 'content': message['engine_message']}
|
|
267
|
+
yield message
|
|
268
|
+
|
|
269
|
+
except Exception as e:
|
|
270
|
+
log_trace(e, f"XReactAgent generate: user_input={user_input}")
|
|
271
|
+
yield {'type': "error", 'content': f"React Agent generate error: {e}"}
|
|
272
|
+
|
|
273
|
+
|
|
274
|
+
async def _prepare_graph_start(self, user_input, agent_context: AgentContext)->TaskState:
|
|
275
|
+
if self.graph is None:
|
|
276
|
+
self.graph = await self._create_graph()
|
|
277
|
+
|
|
278
|
+
self._clear_graph()
|
|
279
|
+
|
|
280
|
+
agent_context = agent_context or {}
|
|
281
|
+
task_id = agent_context.get("task_id", f"xga_task_{uuid4()}")
|
|
282
|
+
agent_context["task_id"] = task_id
|
|
283
|
+
thread_id = agent_context.get('thread_id', task_id)
|
|
284
|
+
agent_context['thread_id'] = thread_id
|
|
285
|
+
session_id = agent_context.get('session_id', task_id)
|
|
286
|
+
agent_context['session_id'] = session_id
|
|
287
|
+
|
|
288
|
+
|
|
289
|
+
langfuse_handler = self._get_langfuse_handler(agent_context)
|
|
290
|
+
callbacks = None
|
|
291
|
+
if langfuse_handler:
|
|
292
|
+
callbacks = [langfuse_handler]
|
|
293
|
+
self.graph_langfuse = langfuse_handler.langfuse
|
|
294
|
+
else:
|
|
295
|
+
self.graph_langfuse = Langfuse(enabled=False)
|
|
296
|
+
|
|
297
|
+
self.graph_config = {
|
|
298
|
+
'recursion_limit': 100,
|
|
299
|
+
'configurable': {
|
|
300
|
+
'thread_id': thread_id
|
|
301
|
+
},
|
|
302
|
+
'callbacks': callbacks
|
|
303
|
+
}
|
|
304
|
+
|
|
305
|
+
graph_input = {
|
|
306
|
+
'user_input' : user_input,
|
|
307
|
+
'next_node' : None,
|
|
308
|
+
'agent_context' : agent_context,
|
|
309
|
+
'iteration_count' : 0
|
|
310
|
+
}
|
|
311
|
+
|
|
312
|
+
return graph_input
|
|
313
|
+
|
|
314
|
+
|
|
315
|
+
def _get_langfuse_handler(self, agent_context: AgentContext)->CallbackHandler:
|
|
316
|
+
langfuse_handler = None
|
|
317
|
+
public_key = os.getenv("LANGFUSE_PUBLIC_KEY")
|
|
318
|
+
secret_key = os.getenv("LANGFUSE_SECRET_KEY")
|
|
319
|
+
host = os.getenv("LANGFUSE_HOST", "https://cloud.langfuse.com")
|
|
320
|
+
|
|
321
|
+
if public_key and secret_key:
|
|
322
|
+
langfuse_handler = CallbackHandler(
|
|
323
|
+
public_key = public_key,
|
|
324
|
+
secret_key = secret_key,
|
|
325
|
+
host = host,
|
|
326
|
+
trace_name = "xga_react_agent",
|
|
327
|
+
session_id = agent_context.get('session_id', None),
|
|
328
|
+
user_id = agent_context.get('user_id', None),
|
|
329
|
+
)
|
|
330
|
+
return langfuse_handler
|
|
331
|
+
|
|
332
|
+
def _clear_graph(self):
|
|
333
|
+
self.graph_config = None
|
|
334
|
+
self.graph_langfuse = None
|
|
335
|
+
self.task_engine: XGATaskEngine = None
|
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
|
|
3
|
+
from uuid import uuid4
|
|
4
|
+
|
|
5
|
+
from xgae.utils.setup_env import setup_logging
|
|
6
|
+
|
|
7
|
+
from examples.agent.langgraph.react.react_agent import XGAReactAgent, AgentContext
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
async def main():
|
|
11
|
+
is_stream = True # two mode agent experience
|
|
12
|
+
task_no = 0
|
|
13
|
+
user_inputs = [
|
|
14
|
+
#"5+5", # For no tool call
|
|
15
|
+
"locate 10.2.3.4 fault and solution", # For custom tool
|
|
16
|
+
#"locate fault and solution", # For human append input
|
|
17
|
+
]
|
|
18
|
+
|
|
19
|
+
for user_input in user_inputs:
|
|
20
|
+
agent = XGAReactAgent()
|
|
21
|
+
task_no += 1
|
|
22
|
+
context: AgentContext = {
|
|
23
|
+
'task_id': f"agent_task_{uuid4()}", # can be set with request_id, must be unique
|
|
24
|
+
'user_id': "agent_user_1",
|
|
25
|
+
'agent_id': "agent_1",
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
is_resume = False
|
|
29
|
+
auto_continue = True
|
|
30
|
+
while auto_continue:
|
|
31
|
+
if is_stream:
|
|
32
|
+
print(f"*** START AGENT : RUN generate USER_INPUT={user_input}")
|
|
33
|
+
async for chunk in agent.generate(user_input, context, is_resume):
|
|
34
|
+
type = chunk['type']
|
|
35
|
+
if type == "error" or type == "answer":
|
|
36
|
+
await asyncio.sleep(1)
|
|
37
|
+
print(f"FINAL_RESULT: {chunk}")
|
|
38
|
+
auto_continue = False
|
|
39
|
+
elif type == "ask":
|
|
40
|
+
print(f"ASK_USER: {chunk}")
|
|
41
|
+
user_input = "17.0.0.1"
|
|
42
|
+
is_resume = True
|
|
43
|
+
auto_continue = True
|
|
44
|
+
else:
|
|
45
|
+
print(f"RESULT_CHUNK: {chunk}")
|
|
46
|
+
auto_continue = False
|
|
47
|
+
else:
|
|
48
|
+
print(f"*** START AGENT : RUN generate_with_result USER_INPUT={user_input}")
|
|
49
|
+
result = await agent.generate_with_result(user_input, context, is_resume)
|
|
50
|
+
await asyncio.sleep(1)
|
|
51
|
+
type = result['type']
|
|
52
|
+
if type == "error" or type == "answer":
|
|
53
|
+
print(f"FINAL_RESULT: {result}")
|
|
54
|
+
auto_continue = False
|
|
55
|
+
elif type == "ask":
|
|
56
|
+
print(f"ASK_USER: {result}")
|
|
57
|
+
user_input = "18.0.0.1"
|
|
58
|
+
is_resume = True
|
|
59
|
+
auto_continue = True
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
if __name__ == "__main__":
|
|
63
|
+
setup_logging()
|
|
64
|
+
asyncio.run(main())
|
|
@@ -13,6 +13,9 @@ if is_stream:
|
|
|
13
13
|
else:
|
|
14
14
|
setup_logging()
|
|
15
15
|
|
|
16
|
+
# Before Run Exec: uv run example-fault-tools --alarmtype=2 , uv run example-a2a-tools
|
|
17
|
+
# If want to use real A2A agent tool, use xga-agent-tool project
|
|
18
|
+
|
|
16
19
|
async def main() -> None:
|
|
17
20
|
tool_box = XGAMcpToolBox(custom_mcp_server_file="mcpservers/custom_servers.json")
|
|
18
21
|
system_prompt = read_file("templates/example/fault_user_prompt.txt")
|
|
@@ -27,16 +30,16 @@ async def main() -> None:
|
|
|
27
30
|
global is_stream
|
|
28
31
|
if is_stream:
|
|
29
32
|
chunks = []
|
|
30
|
-
async for chunk in engine.run_task(
|
|
33
|
+
async for chunk in engine.run_task(task_input={"role": "user", "content": user_input}):
|
|
31
34
|
chunks.append(chunk)
|
|
32
35
|
print(chunk)
|
|
33
36
|
|
|
34
37
|
final_result = engine.parse_final_result(chunks)
|
|
35
38
|
print(f"\n\nFINAL_RESULT: {final_result}")
|
|
36
39
|
else:
|
|
37
|
-
final_result = await engine.run_task_with_final_answer(
|
|
40
|
+
final_result = await engine.run_task_with_final_answer(task_input={"role": "user", "content": user_input})
|
|
38
41
|
print(f"\n\nFINAL_RESULT: {final_result}")
|
|
39
42
|
|
|
40
43
|
|
|
41
|
-
|
|
44
|
+
|
|
42
45
|
asyncio.run(main())
|
|
@@ -10,7 +10,7 @@ async def main() -> None:
|
|
|
10
10
|
engine = XGATaskEngine(general_tools=["*"])
|
|
11
11
|
|
|
12
12
|
user_input = "This week's gold price"
|
|
13
|
-
final_result = await engine.run_task_with_final_answer(
|
|
13
|
+
final_result = await engine.run_task_with_final_answer(task_input={"role": "user", "content": user_input})
|
|
14
14
|
print("FINAL RESULT:", final_result)
|
|
15
15
|
|
|
16
16
|
asyncio.run(main())
|
|
@@ -25,7 +25,7 @@ async def main() -> None:
|
|
|
25
25
|
|
|
26
26
|
user_input = "locate fault and solution"
|
|
27
27
|
final_result = await engine.run_task_with_final_answer(
|
|
28
|
-
|
|
28
|
+
task_input={"role": "user", "content": user_input},
|
|
29
29
|
trace_id=trace_id
|
|
30
30
|
)
|
|
31
31
|
print("FINAL RESULT:", final_result)
|
|
@@ -34,7 +34,7 @@ async def main() -> None:
|
|
|
34
34
|
print("====== Wait for user input ... ======")
|
|
35
35
|
user_input = "ip=10.0.1.1"
|
|
36
36
|
final_result = await engine.run_task_with_final_answer(
|
|
37
|
-
|
|
37
|
+
task_input={"role": "user", "content": user_input},
|
|
38
38
|
trace_id=trace_id
|
|
39
39
|
)
|
|
40
40
|
print("FINAL RESULT:", final_result)
|
|
@@ -10,7 +10,7 @@ async def main() -> None:
|
|
|
10
10
|
engine = XGATaskEngine()
|
|
11
11
|
|
|
12
12
|
final_result = await engine.run_task_with_final_answer(
|
|
13
|
-
|
|
13
|
+
task_input={"role": "user", "content": "1+7"}
|
|
14
14
|
)
|
|
15
15
|
|
|
16
16
|
print("FINAL RESULT:", final_result)
|