xgae 0.1.20__tar.gz → 0.3.9__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {xgae-0.1.20 → xgae-0.3.9}/.env +1 -0
- xgae-0.3.9/CHANGELOG.md +111 -0
- xgae-0.3.9/PKG-INFO +36 -0
- xgae-0.3.9/README.md +20 -0
- xgae-0.3.9/examples/agent/are/README.md +56 -0
- xgae-0.3.9/examples/agent/are/are/simulation/agents/xga/are_agent.py +289 -0
- xgae-0.3.9/examples/agent/are/are/simulation/agents/xga/are_agent_factory.py +26 -0
- xgae-0.3.9/examples/agent/are/are/simulation/agents/xga/are_prompt_builder.py +48 -0
- xgae-0.3.9/examples/agent/are/are/simulation/agents/xga/are_tool_box.py +110 -0
- xgae-0.3.9/examples/agent/are/are/simulation/agents/xga/mcp_tool_executor.py +134 -0
- xgae-0.3.9/examples/agent/are/are/simulation/scenarios/scenario_bomc_fault/scenario.py +184 -0
- xgae-0.3.9/examples/agent/are/are_modify_ref.zip +0 -0
- xgae-0.3.9/examples/agent/are/env.example +43 -0
- xgae-0.3.9/examples/agent/are/mcpservers/example_mcp_apps.json +11 -0
- xgae-0.3.9/examples/agent/are/templates/app_tool_prompt_template.md +2 -0
- xgae-0.3.9/examples/agent/are/templates/are_prompt_template.md +333 -0
- xgae-0.3.9/examples/agent/are/templates/system_tool_prompt_template.md +78 -0
- {xgae-0.1.20/src/examples/agent/langgraph/react → xgae-0.3.9/examples/agent/langgraph/reflection}/agent_base.py +4 -2
- xgae-0.3.9/examples/agent/langgraph/reflection/custom_prompt_rag.py +108 -0
- xgae-0.1.20/src/examples/agent/langgraph/react/react_agent.py → xgae-0.3.9/examples/agent/langgraph/reflection/reflection_agent.py +122 -41
- xgae-0.1.20/src/examples/agent/langgraph/react/final_result_agent.py → xgae-0.3.9/examples/agent/langgraph/reflection/result_eval_agent.py +15 -8
- xgae-0.1.20/src/examples/agent/langgraph/react/run_react_agent.py → xgae-0.3.9/examples/agent/langgraph/reflection/run_agent_app.py +2 -2
- {xgae-0.1.20/src → xgae-0.3.9}/examples/engine/run_custom_and_agent_tools.py +1 -1
- xgae-0.3.9/mcpservers/xga_server.json +11 -0
- {xgae-0.1.20 → xgae-0.3.9}/pyproject.toml +17 -8
- {xgae-0.1.20 → xgae-0.3.9}/templates/agent_tool_prompt_template.txt +1 -0
- {xgae-0.1.20 → xgae-0.3.9}/templates/custom_tool_prompt_template.txt +11 -8
- xgae-0.1.20/templates/example/final_result_template.txt → xgae-0.3.9/templates/example/result_eval_template.txt +10 -5
- {xgae-0.1.20 → xgae-0.3.9}/templates/general_tool_prompt_template.txt +1 -0
- xgae-0.3.9/test/test_chroma.py +31 -0
- xgae-0.3.9/uv.lock +3242 -0
- {xgae-0.1.20/src → xgae-0.3.9}/xgae/engine/engine_base.py +5 -1
- {xgae-0.1.20/src → xgae-0.3.9}/xgae/engine/mcp_tool_box.py +22 -14
- {xgae-0.1.20/src → xgae-0.3.9}/xgae/engine/prompt_builder.py +21 -12
- {xgae-0.1.20/src → xgae-0.3.9}/xgae/engine/responser/non_stream_responser.py +2 -1
- {xgae-0.1.20/src → xgae-0.3.9}/xgae/engine/responser/stream_responser.py +3 -2
- {xgae-0.1.20/src → xgae-0.3.9}/xgae/engine/task_engine.py +44 -6
- xgae-0.3.9/xgae/gaia2/are_engine.py +126 -0
- {xgae-0.1.20/src → xgae-0.3.9}/xgae/utils/llm_client.py +5 -2
- xgae-0.1.20/CHANGELOG.md +0 -50
- xgae-0.1.20/PKG-INFO +0 -12
- xgae-0.1.20/README.md +0 -0
- xgae-0.1.20/uv.lock +0 -1463
- {xgae-0.1.20 → xgae-0.3.9}/.python-version +0 -0
- {xgae-0.1.20 → xgae-0.3.9/examples/agent/are}/mcpservers/xga_server.json +0 -0
- {xgae-0.1.20/src → xgae-0.3.9}/examples/engine/run_general_tools.py +0 -0
- {xgae-0.1.20/src → xgae-0.3.9}/examples/engine/run_human_in_loop.py +0 -0
- {xgae-0.1.20/src → xgae-0.3.9}/examples/engine/run_simple.py +0 -0
- {xgae-0.1.20/src → xgae-0.3.9}/examples/tools/custom_fault_tools_app.py +0 -0
- {xgae-0.1.20/src → xgae-0.3.9}/examples/tools/simu_a2a_tools_app.py +0 -0
- {xgae-0.1.20 → xgae-0.3.9}/mcpservers/custom_servers.json +0 -0
- {xgae-0.1.20 → xgae-0.3.9}/mcpservers/xga_server_sse.json +0 -0
- {xgae-0.1.20 → xgae-0.3.9}/templates/example/fault_user_prompt.txt +0 -0
- {xgae-0.1.20 → xgae-0.3.9}/templates/gemini_system_prompt_template.txt +0 -0
- {xgae-0.1.20 → xgae-0.3.9}/templates/system_prompt_response_sample.txt +0 -0
- {xgae-0.1.20 → xgae-0.3.9}/templates/system_prompt_template.txt +0 -0
- {xgae-0.1.20 → xgae-0.3.9}/test/test_langfuse.py +0 -0
- {xgae-0.1.20 → xgae-0.3.9}/test/test_litellm_langfuse.py +0 -0
- {xgae-0.1.20/src → xgae-0.3.9}/xgae/__init__.py +0 -0
- {xgae-0.1.20/src → xgae-0.3.9}/xgae/engine/responser/responser_base.py +0 -0
- {xgae-0.1.20/src → xgae-0.3.9}/xgae/engine/task_langfuse.py +0 -0
- {xgae-0.1.20/src → xgae-0.3.9}/xgae/engine_cli_app.py +0 -0
- {xgae-0.1.20/src → xgae-0.3.9}/xgae/tools/without_general_tools_app.py +0 -0
- {xgae-0.1.20/src → xgae-0.3.9}/xgae/utils/__init__.py +0 -0
- {xgae-0.1.20/src → xgae-0.3.9}/xgae/utils/json_helpers.py +0 -0
- {xgae-0.1.20/src → xgae-0.3.9}/xgae/utils/misc.py +0 -0
- {xgae-0.1.20/src → xgae-0.3.9}/xgae/utils/setup_env.py +0 -0
- {xgae-0.1.20/src → xgae-0.3.9}/xgae/utils/xml_tool_parser.py +0 -0
{xgae-0.1.20 → xgae-0.3.9}/.env
RENAMED
xgae-0.3.9/CHANGELOG.md
ADDED
|
@@ -0,0 +1,111 @@
|
|
|
1
|
+
## [0.3.9] - 2025-11-4
|
|
2
|
+
### Added
|
|
3
|
+
- ✅ GAIA2 ARE Example: Add new prompt template, Leading over ARE 'default agent'
|
|
4
|
+
### Modified
|
|
5
|
+
- GAIA2 ARE Example: Refact prompt templates, use MD format
|
|
6
|
+
- GAIA2 ARE Example: XGAArePromptBuilder add 'prior' template mode
|
|
7
|
+
- ARE Engine: remove useless 'system_prompt' init parameter
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
## [0.3.5] - 2025-11-1
|
|
11
|
+
### Added
|
|
12
|
+
- GAIA2 ARE Example: XGAArePromptBuilder, Use MCP tool format general tool construct prompt
|
|
13
|
+
### Modified
|
|
14
|
+
- GAIA2 ARE Example: Refact code struct and class name
|
|
15
|
+
- GAIA2 ARE Example: Optimize prompt template
|
|
16
|
+
- ARETaskEngine: add prompt_builder init parameter
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
## [0.3.3] - 2025-10-30
|
|
20
|
+
### Added
|
|
21
|
+
- GAIA2 ARE Scenario: scenario_bomc_fault
|
|
22
|
+
- GAIA2 ARE MCP: Support Custom MCP Apps, example_mcp_apps.json
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
## [0.3.2] - 2025-10-24
|
|
26
|
+
### Added
|
|
27
|
+
- GAIA2 ARE Example: XGAAreAgent, XGAAreToolBox
|
|
28
|
+
### Modified
|
|
29
|
+
- ARETaskEngine
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
## [0.3.0] - 2025-10-22
|
|
33
|
+
### Added
|
|
34
|
+
- Support GAIA2 ARE: ARETaskEngine
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
## [0.2.4] - 2025-9-23
|
|
38
|
+
### Modified
|
|
39
|
+
- Refact project structure
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
## [0.2.3] - 2025-9-19
|
|
43
|
+
### Modified
|
|
44
|
+
- CustomPromptRag: remove FastEmbedEmbeddings, use 'text-embedding-v3' model for chinese, avoid download 'bge-small-zh-v1.5'
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
## [0.2.1] - 2025-9-17
|
|
48
|
+
### Added
|
|
49
|
+
- Example ReflectionAgent: add CustomPromptRag, use FastEmbedEmbeddings and 'BAAI/bge-small-zh-v1.5' model
|
|
50
|
+
### Modified
|
|
51
|
+
- pyproject.toml: add [project.optional-dependencies] 'examples'
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
## [0.2.0] - 2025-9-10
|
|
55
|
+
### Added
|
|
56
|
+
- Agent Engine release 0.2
|
|
57
|
+
- Example: Langgraph ReflectionAgent release 0.2
|
|
58
|
+
### Fixed
|
|
59
|
+
- Agent Engine: call mcp tool fail, call 'ask' tool again and again
|
|
60
|
+
- Example Langgraph ReflectionAgent: retry on 'ask', user_input is ask answer
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
## [0.1.20] - 2025-9-9
|
|
64
|
+
### Added
|
|
65
|
+
- Example: Langgraph ReflectionAgent add final_result_agent
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
## [0.1.19] - 2025-9-8
|
|
69
|
+
### Added
|
|
70
|
+
- Example: Langgraph ReflectionAgent release V1, full logic but no final result agent and tool select agent
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
# Release Changelog
|
|
74
|
+
## [0.1.18] - 2025-9-3
|
|
75
|
+
### Added
|
|
76
|
+
- Support Agent tools
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
## [0.1.17] - 2025-9-1
|
|
80
|
+
### Target
|
|
81
|
+
- Saved for XGATaskEngine base version
|
|
82
|
+
### Changed
|
|
83
|
+
- Delete StreamTaskResponser tool_exec_on_stream model code
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
## [0.1.15] - 2025-9-1
|
|
87
|
+
### Target
|
|
88
|
+
- Saved for StreamResponser tool_exec_on_stream mode, next release will be abolished
|
|
89
|
+
### Changed
|
|
90
|
+
- Refact TaskResponseProcessor, XGATaskEngine
|
|
91
|
+
### Fixed
|
|
92
|
+
- Fix finish_reason judge logic
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
## [0.1.14] - 2025-8-31
|
|
96
|
+
### Target
|
|
97
|
+
- First complete version is merged
|
|
98
|
+
### Changed
|
|
99
|
+
- StreamTaskResponser first version
|
|
100
|
+
|
|
101
|
+
## [0.1.10] - 2025-8-28
|
|
102
|
+
### Target
|
|
103
|
+
- NonStream mode release is completed
|
|
104
|
+
### Changed
|
|
105
|
+
- StreamTaskResponser is original
|
|
106
|
+
- NonStreamTaskResponser first version is completed
|
|
107
|
+
- Langfuse use 2.x, match for LiteLLM package
|
|
108
|
+
|
|
109
|
+
## [0.1.7] - 2025-8-25
|
|
110
|
+
### Target
|
|
111
|
+
- Langfuse use 3.x package
|
xgae-0.3.9/PKG-INFO
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: xgae
|
|
3
|
+
Version: 0.3.9
|
|
4
|
+
Summary: Extreme General Agent Engine
|
|
5
|
+
Requires-Python: >=3.11
|
|
6
|
+
Requires-Dist: colorlog>=6.9.0
|
|
7
|
+
Requires-Dist: langchain-mcp-adapters>=0.1.9
|
|
8
|
+
Requires-Dist: langfuse==2.60.9
|
|
9
|
+
Requires-Dist: litellm>=1.71.1
|
|
10
|
+
Requires-Dist: mcp>=1.11.0
|
|
11
|
+
Provides-Extra: examples
|
|
12
|
+
Requires-Dist: chromadb==1.1.0; extra == 'examples'
|
|
13
|
+
Requires-Dist: langchain-community==0.3.29; extra == 'examples'
|
|
14
|
+
Requires-Dist: langgraph==0.6.5; extra == 'examples'
|
|
15
|
+
Description-Content-Type: text/markdown
|
|
16
|
+
|
|
17
|
+
## XGAE: Extreme General Agent Engine
|
|
18
|
+
### Functional Features
|
|
19
|
+
- Support Custom prompt and external MCP Tools
|
|
20
|
+
- Support Langfuse
|
|
21
|
+
- Support Langgraph
|
|
22
|
+
- Support GAIA2 ARE agent and Custom MCP Apps, Leading over ARE 'default agent'
|
|
23
|
+
- Support Human-in-Loop in agent
|
|
24
|
+
- Can Use A2A protocol call A2A Agent as tool by 'xgaproxy' project
|
|
25
|
+
- Can Use E2B or Daytona Sandbox of 'xgatools' project
|
|
26
|
+
|
|
27
|
+
### Non-Functional Features
|
|
28
|
+
- Faster than SUNA Engine's speed
|
|
29
|
+
- Architecture is lighter than SUNA Engine
|
|
30
|
+
- Separate tools from Agent Engine
|
|
31
|
+
|
|
32
|
+
### Examples
|
|
33
|
+
- langgraph: Build React mode Langgraph Agent by XGA Engine
|
|
34
|
+
- are: Build GAIA2 ARE Agent by XGA Engine
|
|
35
|
+
- engine: Use XGA Engine in various scenarios
|
|
36
|
+
- tools: Simulation tools for example and test
|
xgae-0.3.9/README.md
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
## XGAE: Extreme General Agent Engine
|
|
2
|
+
### Functional Features
|
|
3
|
+
- Support Custom prompt and external MCP Tools
|
|
4
|
+
- Support Langfuse
|
|
5
|
+
- Support Langgraph
|
|
6
|
+
- Support GAIA2 ARE agent and Custom MCP Apps, Leading over ARE 'default agent'
|
|
7
|
+
- Support Human-in-Loop in agent
|
|
8
|
+
- Can Use A2A protocol call A2A Agent as tool by 'xgaproxy' project
|
|
9
|
+
- Can Use E2B or Daytona Sandbox of 'xgatools' project
|
|
10
|
+
|
|
11
|
+
### Non-Functional Features
|
|
12
|
+
- Faster than SUNA Engine's speed
|
|
13
|
+
- Architecture is lighter than SUNA Engine
|
|
14
|
+
- Separate tools from Agent Engine
|
|
15
|
+
|
|
16
|
+
### Examples
|
|
17
|
+
- langgraph: Build React mode Langgraph Agent by XGA Engine
|
|
18
|
+
- are: Build GAIA2 ARE Agent by XGA Engine
|
|
19
|
+
- engine: Use XGA Engine in various scenarios
|
|
20
|
+
- tools: Simulation tools for example and test
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
## GAIA2 ARE Support
|
|
2
|
+
### How to add XGAE to ARE Project
|
|
3
|
+
- add xgae==0.3.x to ARE requirements.txt
|
|
4
|
+
- uv pip install -r requirements.txt
|
|
5
|
+
- Modify ARE 'AgentBuilder' and 'AgentConfigBuilder' class, add "xga" type agent :
|
|
6
|
+
```
|
|
7
|
+
File: agent_builder.py
|
|
8
|
+
class AgentBuilder:
|
|
9
|
+
def list_agents(self) -> list[str]:
|
|
10
|
+
return ["default", "xga"]
|
|
11
|
+
|
|
12
|
+
def build():
|
|
13
|
+
...
|
|
14
|
+
agent_name = agent_config.get_agent_name()
|
|
15
|
+
if agent_name in ["default", "xga"]:
|
|
16
|
+
# add xga agent code
|
|
17
|
+
|
|
18
|
+
File: agent_config_builder.py
|
|
19
|
+
class AgentConfigBuilder:
|
|
20
|
+
def build():
|
|
21
|
+
if agent_name in["default", "xga"]:
|
|
22
|
+
```
|
|
23
|
+
- Modify ARE 'MCPApp' :
|
|
24
|
+
```
|
|
25
|
+
File: mcp_app.py
|
|
26
|
+
class MCPApp:
|
|
27
|
+
def _call_tool(self, tool_name: str, **kwargs) -> str:
|
|
28
|
+
try:
|
|
29
|
+
...
|
|
30
|
+
# original code don't support async engine loop
|
|
31
|
+
from are.simulation.agents.xga.mcp_tool_executor import call_mcp_tool
|
|
32
|
+
result = call_mcp_tool(self.server_url, tool_name, kwargs, 10)
|
|
33
|
+
return str(result)
|
|
34
|
+
```
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
- Modify ARE .env, add XGAE .env config, refer to env.example
|
|
38
|
+
- Copy XGAE package 'mcpservers' and 'templates' directory to ARE project root
|
|
39
|
+
- You can use prompt template by two modes, 'prior' mode is leading over ARE 'default agent'
|
|
40
|
+
|
|
41
|
+
### Run XGA Agent in ARE
|
|
42
|
+
```
|
|
43
|
+
# Run ARE Code Scenario
|
|
44
|
+
uv run are-run -e -s scenario_apps_tutorial -a xga --model openai/qwen3-235b-a22b --provider llama-api --output_dir ./output --log-level INFO
|
|
45
|
+
|
|
46
|
+
# Run Custom Scenario
|
|
47
|
+
uv run example-fault-tools
|
|
48
|
+
uv run are-run -e -s scenario_bomc_fault -a xga --model openai/qwen3-235b-a22b --provider llama-api --output_dir ./output --log-level INFO
|
|
49
|
+
|
|
50
|
+
# Run HF Scenario Benchmark
|
|
51
|
+
uv run are-benchmark run -a xga --dataset ./scenarios --config mini --model openai/deepseek-v3.1 --provider llama-api --output_dir ./output --limit 1 --num_runs 1 --max_concurrent_scenarios 1 --log-level INFO
|
|
52
|
+
|
|
53
|
+
# Run GUI
|
|
54
|
+
uv run are-gui -a xga -s scenario_find_image_file --model openai/qwen3-235b-a22b --provider llama-api --log-level INFO
|
|
55
|
+
|
|
56
|
+
```
|
|
@@ -0,0 +1,289 @@
|
|
|
1
|
+
import re
|
|
2
|
+
import asyncio
|
|
3
|
+
from typing import List, Dict, Any
|
|
4
|
+
from typing_extensions import override
|
|
5
|
+
|
|
6
|
+
from are.simulation.agents.are_simulation_agent import AgentStoppedException
|
|
7
|
+
from are.simulation.agents.llm.types import MMObservation
|
|
8
|
+
from are.simulation.agents.multimodal import Attachment, attachments_to_pil
|
|
9
|
+
from are.simulation.agents.agent_log import (
|
|
10
|
+
LLMInputLog,
|
|
11
|
+
LLMOutputThoughtActionLog,
|
|
12
|
+
StepLog,
|
|
13
|
+
StopLog,
|
|
14
|
+
SystemPromptLog,
|
|
15
|
+
ThoughtLog,
|
|
16
|
+
ToolCallLog,
|
|
17
|
+
FinalAnswerLog
|
|
18
|
+
)
|
|
19
|
+
from are.simulation.agents.default_agent.base_agent import (
|
|
20
|
+
BaseAgent,
|
|
21
|
+
RunningState,
|
|
22
|
+
get_offset_from_time_config_mode
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
from xgae.utils.llm_client import LLMConfig
|
|
26
|
+
from xgae.gaia2.are_engine import ARETaskEngine
|
|
27
|
+
from are.simulation.agents.xga.are_tool_box import XGAAreToolBox
|
|
28
|
+
from are.simulation.agents.xga.are_prompt_builder import XGAArePromptBuilder
|
|
29
|
+
|
|
30
|
+
def pre_run_task_check(agent, iterations: int, llm_messages: List[Dict[str, Any]]):
|
|
31
|
+
try:
|
|
32
|
+
agent.logger.info(f"\n\n------ Starting Run Task Iteration {iterations}... ------")
|
|
33
|
+
|
|
34
|
+
if iterations == 0:
|
|
35
|
+
agent.system_prompt = agent.task_engine.task_prompt
|
|
36
|
+
agent.logger.info(f"------ SYSTEM_PROMPT ------ \n{agent.system_prompt}\n")
|
|
37
|
+
agent.append_agent_log(
|
|
38
|
+
SystemPromptLog(
|
|
39
|
+
content = agent.system_prompt,
|
|
40
|
+
timestamp = agent.make_timestamp(),
|
|
41
|
+
agent_id = agent.agent_id,
|
|
42
|
+
)
|
|
43
|
+
)
|
|
44
|
+
|
|
45
|
+
agent.append_agent_log(
|
|
46
|
+
StepLog(
|
|
47
|
+
iteration = agent.iterations,
|
|
48
|
+
timestamp = agent.make_timestamp(),
|
|
49
|
+
agent_id = agent.agent_id,
|
|
50
|
+
)
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
if agent.stop_event.is_set():
|
|
54
|
+
agent.logger.info(f"pre_run_task_check[{iterations}]: Recv Stop Event before condition, raise AgentStoppedException")
|
|
55
|
+
raise AgentStoppedException("Agent stopped.")
|
|
56
|
+
|
|
57
|
+
# Execute a pre_step() function if it exists
|
|
58
|
+
for conditional_step in agent.conditional_pre_steps:
|
|
59
|
+
if conditional_step.condition is None or conditional_step.condition(agent):
|
|
60
|
+
conditional_step.function(agent)
|
|
61
|
+
|
|
62
|
+
if agent.stop_event.is_set():
|
|
63
|
+
agent.logger.info(f"pre_run_task_check[{iterations}]: Recv Stop Event after condition, raise AgentStoppedException")
|
|
64
|
+
raise AgentStoppedException("Agent stopped.")
|
|
65
|
+
|
|
66
|
+
# Begin step()
|
|
67
|
+
agent.append_agent_log(
|
|
68
|
+
LLMInputLog(
|
|
69
|
+
content = llm_messages,
|
|
70
|
+
timestamp = agent.make_timestamp(),
|
|
71
|
+
agent_id = agent.agent_id
|
|
72
|
+
)
|
|
73
|
+
)
|
|
74
|
+
|
|
75
|
+
if agent.simulated_generation_time_config is not None:
|
|
76
|
+
if agent.pause_env is None:
|
|
77
|
+
raise ValueError("pause_env is not set")
|
|
78
|
+
agent.pause_env()
|
|
79
|
+
except Exception as e:
|
|
80
|
+
agent.log_error(e)
|
|
81
|
+
agent.logger.info(f"pre_run_task_check[{iterations}]: Exception Occur, Stop task")
|
|
82
|
+
agent.task_engine.stop_task()
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
def post_run_task_check(agent, iterations: int, llm_response: Dict[str, Any]):
|
|
86
|
+
try:
|
|
87
|
+
agent.logger.info(f"------ LLM Response Iteration [{iterations}] ------ \n{llm_response}\n")
|
|
88
|
+
|
|
89
|
+
llm_output = str(llm_response)
|
|
90
|
+
|
|
91
|
+
# Resume the environment after the generation of a thought/action if needed
|
|
92
|
+
if agent.simulated_generation_time_config is not None:
|
|
93
|
+
if agent.resume_env is None:
|
|
94
|
+
raise ValueError("resume_env is not set")
|
|
95
|
+
|
|
96
|
+
offset = get_offset_from_time_config_mode(
|
|
97
|
+
time_config = agent.simulated_generation_time_config,
|
|
98
|
+
completion_duration = 0,
|
|
99
|
+
)
|
|
100
|
+
agent.logger.info(f"post_run_task_check[{iterations}]: Resuming environment with {offset} offset")
|
|
101
|
+
agent.resume_env(offset)
|
|
102
|
+
|
|
103
|
+
metadata = {}
|
|
104
|
+
agent.append_agent_log(
|
|
105
|
+
LLMOutputThoughtActionLog(
|
|
106
|
+
content = llm_output,
|
|
107
|
+
timestamp = agent.make_timestamp(),
|
|
108
|
+
agent_id = agent.agent_id,
|
|
109
|
+
prompt_tokens = metadata.get("prompt_tokens", 0),
|
|
110
|
+
completion_tokens = metadata.get("completion_tokens", 0),
|
|
111
|
+
total_tokens = metadata.get("total_tokens", 0),
|
|
112
|
+
reasoning_tokens = metadata.get("reasoning_tokens", 0),
|
|
113
|
+
completion_duration = metadata.get("completion_duration", 0),
|
|
114
|
+
)
|
|
115
|
+
)
|
|
116
|
+
# end step()
|
|
117
|
+
|
|
118
|
+
if agent.stop_event.is_set():
|
|
119
|
+
agent.logger.info(f"post_run_task_check[{iterations}]: Recv Stop Event, raise AgentStoppedException")
|
|
120
|
+
raise AgentStoppedException("Agent stopped.")
|
|
121
|
+
|
|
122
|
+
# Execute a post_step() function if it exists (polling the Meta Agents Research Environments notifications for example)
|
|
123
|
+
for conditional_step in agent.conditional_post_steps:
|
|
124
|
+
if conditional_step.condition is None or conditional_step.condition(agent):
|
|
125
|
+
conditional_step.function(agent)
|
|
126
|
+
except Exception as e:
|
|
127
|
+
agent.log_error(e)
|
|
128
|
+
agent.logger.info(f"post_run_task_check[{iterations}]: Exception Occur, Stop task")
|
|
129
|
+
agent.task_engine.stop_task()
|
|
130
|
+
finally:
|
|
131
|
+
if agent.simulated_generation_time_config and agent.resume_env:
|
|
132
|
+
agent.resume_env(0.0) # Resume without advancing time
|
|
133
|
+
|
|
134
|
+
agent.iterations += 1
|
|
135
|
+
agent.planning_counter += 1
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
def terminate_task_check(agent, iterations: int) -> bool:
|
|
139
|
+
is_terminate_task = False
|
|
140
|
+
|
|
141
|
+
try:
|
|
142
|
+
if agent.termination_step.condition:
|
|
143
|
+
is_terminate_task = agent.termination_step.condition(agent)
|
|
144
|
+
if is_terminate_task:
|
|
145
|
+
agent.logger.info(f"terminate_task_check[{iterations}]: termination_step.condition is True")
|
|
146
|
+
except Exception as e:
|
|
147
|
+
agent.log_error(e)
|
|
148
|
+
|
|
149
|
+
return is_terminate_task
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
class XGAAreAgent(BaseAgent):
|
|
153
|
+
def __init__(self, **kwargs):
|
|
154
|
+
super().__init__(**kwargs)
|
|
155
|
+
self.task_engine: ARETaskEngine = None
|
|
156
|
+
|
|
157
|
+
|
|
158
|
+
@override
|
|
159
|
+
def initialize(self, attachments: list[Attachment] | None = None, **kwargs) -> None:
|
|
160
|
+
self.logs = []
|
|
161
|
+
self.iterations = 0
|
|
162
|
+
self.planning_counter = 0
|
|
163
|
+
|
|
164
|
+
tool_box = XGAAreToolBox(self.tools)
|
|
165
|
+
|
|
166
|
+
general_system_prompt = "\n\n".join(prompt for prompt in self.init_system_prompts.values())
|
|
167
|
+
prompt_builder = XGAArePromptBuilder(general_system_prompt, "prior")
|
|
168
|
+
|
|
169
|
+
model_config = self.llm_engine.model_config
|
|
170
|
+
llm_config = LLMConfig(
|
|
171
|
+
model = model_config.model_name,
|
|
172
|
+
api_key = model_config.api_key,
|
|
173
|
+
api_base = model_config.endpoint
|
|
174
|
+
)
|
|
175
|
+
|
|
176
|
+
self.task_engine = ARETaskEngine(
|
|
177
|
+
agent = self,
|
|
178
|
+
agent_id = self.agent_id,
|
|
179
|
+
max_auto_run = self.max_iterations,
|
|
180
|
+
llm_config = llm_config,
|
|
181
|
+
tool_box = tool_box,
|
|
182
|
+
prompt_builder = prompt_builder,
|
|
183
|
+
pre_run_task_fn = pre_run_task_check,
|
|
184
|
+
post_run_task_fn = post_run_task_check,
|
|
185
|
+
terminate_task_fn = terminate_task_check,
|
|
186
|
+
)
|
|
187
|
+
|
|
188
|
+
# Reload the agent state if logs are provided
|
|
189
|
+
start_logs = kwargs.pop("start_logs", [])
|
|
190
|
+
if start_logs:
|
|
191
|
+
self.replay(start_logs)
|
|
192
|
+
|
|
193
|
+
# Include additional image PILs directly into state stack.
|
|
194
|
+
if attachments:
|
|
195
|
+
images = attachments_to_pil(attachments)
|
|
196
|
+
self.action_executor.inject_state({f"image_{i}": image for i, image in enumerate(images)})
|
|
197
|
+
self.logger.debug(f"XGAAreAgent initialize: Injecting images into states for {len(images)} images")
|
|
198
|
+
self.logger.debug(f"XGAAreAgent initialize: New Keys {','.join(self.action_executor.state.keys())}")
|
|
199
|
+
|
|
200
|
+
self.initialized = True
|
|
201
|
+
|
|
202
|
+
|
|
203
|
+
@override
|
|
204
|
+
def execute_agent_loop(self) -> str | None | MMObservation:
|
|
205
|
+
with asyncio.Runner() as runner:
|
|
206
|
+
runner.run(self.async_execute_agent_loop())
|
|
207
|
+
|
|
208
|
+
# We have reached a termination condition, execute the termination method
|
|
209
|
+
if self.termination_step.function is not None and not self.stop_event.is_set():
|
|
210
|
+
return self.termination_step.function(self)
|
|
211
|
+
|
|
212
|
+
|
|
213
|
+
async def async_execute_agent_loop(self) -> str | None | MMObservation:
|
|
214
|
+
chunks = []
|
|
215
|
+
async for chunk in self.task_engine.run_task(task_input={"role": "user", "content": self.task}):
|
|
216
|
+
chunks.append(chunk)
|
|
217
|
+
chunk_type = chunk['type']
|
|
218
|
+
if chunk_type== "status":
|
|
219
|
+
status_content = chunk['content']
|
|
220
|
+
status_type = status_content['status_type']
|
|
221
|
+
if status_type == "error":
|
|
222
|
+
error_msg = chunk.get('message')
|
|
223
|
+
self.logger.warning(f"XGAAreAgent execute_agent_loop: Fatal error - {error_msg}")
|
|
224
|
+
self.log_error(error_msg)
|
|
225
|
+
elif status_type == "stop":
|
|
226
|
+
error_msg = chunk.get('message')
|
|
227
|
+
self.logger.warning("XGAAreAgent execute_agent_loop: Agent stopped.")
|
|
228
|
+
self.append_agent_log(
|
|
229
|
+
StopLog(
|
|
230
|
+
content = f"Agent stopped - {error_msg}",
|
|
231
|
+
timestamp = self.make_timestamp(),
|
|
232
|
+
agent_id = self.agent_id,
|
|
233
|
+
)
|
|
234
|
+
)
|
|
235
|
+
elif chunk_type == "assistant":
|
|
236
|
+
llm_content = chunk['content']['content']
|
|
237
|
+
if "<thought>" in llm_content:
|
|
238
|
+
thought_content = re.search(r'<thought>(.*?)</thought>', llm_content, re.DOTALL).group(1).strip()
|
|
239
|
+
if thought_content:
|
|
240
|
+
self.append_agent_log(
|
|
241
|
+
ThoughtLog(
|
|
242
|
+
content = thought_content,
|
|
243
|
+
timestamp = self.make_timestamp(),
|
|
244
|
+
agent_id = self.agent_id,
|
|
245
|
+
)
|
|
246
|
+
)
|
|
247
|
+
elif chunk_type == "tool":
|
|
248
|
+
tool_content = chunk['content']
|
|
249
|
+
tool_execution = tool_content.get('tool_execution')
|
|
250
|
+
self.append_agent_log(
|
|
251
|
+
ToolCallLog(
|
|
252
|
+
tool_name = tool_execution.get('function_name'),
|
|
253
|
+
tool_arguments = tool_execution.get('arguments'),
|
|
254
|
+
timestamp = self.make_timestamp(),
|
|
255
|
+
agent_id = self.agent_id,
|
|
256
|
+
)
|
|
257
|
+
)
|
|
258
|
+
#print(chunk)
|
|
259
|
+
|
|
260
|
+
final_result = self.task_engine.parse_final_result(chunks)
|
|
261
|
+
print(f"\n\nFINAL_RESULT: {final_result}")
|
|
262
|
+
|
|
263
|
+
# Send Final Result to user
|
|
264
|
+
args = {
|
|
265
|
+
'content': final_result['content']
|
|
266
|
+
}
|
|
267
|
+
self.tools['AgentUserInterface__send_message_to_user'](**args)
|
|
268
|
+
self.append_agent_log(
|
|
269
|
+
ToolCallLog(
|
|
270
|
+
tool_name = 'AgentUserInterface__send_message_to_user',
|
|
271
|
+
tool_arguments = args,
|
|
272
|
+
timestamp = self.make_timestamp(),
|
|
273
|
+
agent_id = self.agent_id,
|
|
274
|
+
)
|
|
275
|
+
)
|
|
276
|
+
|
|
277
|
+
# Return Final Result
|
|
278
|
+
if final_result['type'] == "error":
|
|
279
|
+
self.custom_state["running_state"] = RunningState.FAILED
|
|
280
|
+
else:
|
|
281
|
+
self.custom_state["running_state"] = RunningState.TERMINATED
|
|
282
|
+
|
|
283
|
+
self.append_agent_log(
|
|
284
|
+
FinalAnswerLog(
|
|
285
|
+
content = final_result['content'],
|
|
286
|
+
timestamp = self.make_timestamp(),
|
|
287
|
+
agent_id = self.agent_id
|
|
288
|
+
)
|
|
289
|
+
)
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
from are.simulation.agents.are_simulation_agent_config import ARESimulationReactBaseAgentConfig
|
|
2
|
+
from are.simulation.agents.default_agent.steps.are_simulation import get_are_simulation_update_pre_step
|
|
3
|
+
from are.simulation.agents.default_agent.termination_methods.are_simulation import get_gaia2_termination_step
|
|
4
|
+
from are.simulation.agents.default_agent.tools.json_action_executor import JsonActionExecutor
|
|
5
|
+
from are.simulation.agents.llm.llm_engine import LLMEngine
|
|
6
|
+
|
|
7
|
+
from are.simulation.agents.xga.are_agent import XGAAreAgent
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def xga_simulation_react_xml_agent(
|
|
11
|
+
llm_engine: LLMEngine, base_agent_config: ARESimulationReactBaseAgentConfig
|
|
12
|
+
):
|
|
13
|
+
return XGAAreAgent(
|
|
14
|
+
llm_engine=llm_engine,
|
|
15
|
+
tools={},
|
|
16
|
+
system_prompts={
|
|
17
|
+
"system_prompt": str(base_agent_config.system_prompt),
|
|
18
|
+
},
|
|
19
|
+
termination_step=get_gaia2_termination_step(),
|
|
20
|
+
max_iterations=base_agent_config.max_iterations,
|
|
21
|
+
action_executor=JsonActionExecutor( # Just for compatible BaseAgent, useless
|
|
22
|
+
use_custom_logger=base_agent_config.use_custom_logger
|
|
23
|
+
),
|
|
24
|
+
conditional_pre_steps=[get_are_simulation_update_pre_step()],
|
|
25
|
+
use_custom_logger=base_agent_config.use_custom_logger,
|
|
26
|
+
)
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
import re
|
|
2
|
+
from typing_extensions import override
|
|
3
|
+
from typing import Optional, List, Literal
|
|
4
|
+
|
|
5
|
+
from xgae.engine.engine_base import XGAToolSchema
|
|
6
|
+
from xgae.engine.prompt_builder import XGAPromptBuilder
|
|
7
|
+
|
|
8
|
+
class XGAArePromptBuilder(XGAPromptBuilder):
|
|
9
|
+
def __init__(self,
|
|
10
|
+
general_system_prompt: Optional[str],
|
|
11
|
+
prompt_template_mode: Literal["basic", "prior"] = "prior"
|
|
12
|
+
):
|
|
13
|
+
system_prompt = ""
|
|
14
|
+
if prompt_template_mode == "basic":
|
|
15
|
+
system_prompt = self.build_general_system_prompt(general_system_prompt)
|
|
16
|
+
|
|
17
|
+
super().__init__(system_prompt)
|
|
18
|
+
|
|
19
|
+
self.prompt_template_mode = prompt_template_mode
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def build_general_system_prompt(self, _system_prompt: str)-> str:
|
|
23
|
+
pattern = r'<general_instructions>(.*?)</general_instructions>'
|
|
24
|
+
prompt_are_general = re.search(pattern, _system_prompt, re.DOTALL)
|
|
25
|
+
prompt_header = "# CORE IDENTITY\n"
|
|
26
|
+
if prompt_are_general:
|
|
27
|
+
prompt_are_general = prompt_header + prompt_are_general.group(1).strip() + "\n\n"
|
|
28
|
+
else:
|
|
29
|
+
prompt_are_general = prompt_header + _system_prompt + "\n\n"
|
|
30
|
+
return prompt_are_general
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
@override
|
|
34
|
+
def build_general_tool_prompt(self, tool_schemas: List[XGAToolSchema])-> str:
|
|
35
|
+
tool_prompt = ""
|
|
36
|
+
if self.prompt_template_mode == "basic":
|
|
37
|
+
tool_prompt = self.build_mcp_tool_prompt("templates/system_tool_prompt_template.md", tool_schemas)
|
|
38
|
+
return tool_prompt
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
@override
|
|
42
|
+
def build_custom_tool_prompt(self, tool_schemas:List[XGAToolSchema])-> str:
|
|
43
|
+
if self.prompt_template_mode == "basic":
|
|
44
|
+
tool_prompt = self.build_mcp_tool_prompt("templates/app_tool_prompt_template.md", tool_schemas)
|
|
45
|
+
else:
|
|
46
|
+
tool_prompt = self.build_mcp_tool_prompt("templates/are_prompt_template.md", tool_schemas)
|
|
47
|
+
return tool_prompt
|
|
48
|
+
|