xgae 0.3.0__tar.gz → 0.3.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of xgae might be problematic. Click here for more details.

Files changed (57) hide show
  1. {xgae-0.3.0 → xgae-0.3.2}/CHANGELOG.md +8 -2
  2. xgae-0.3.2/PKG-INFO +36 -0
  3. xgae-0.3.2/README.md +20 -0
  4. xgae-0.3.2/examples/agent/are/simulation/agents/xga/README.md +30 -0
  5. xgae-0.3.2/examples/agent/are/simulation/agents/xga/env.example +42 -0
  6. xgae-0.3.2/examples/agent/are/simulation/agents/xga/xga_agent.py +289 -0
  7. xgae-0.3.2/examples/agent/are/simulation/agents/xga/xga_agent_factory.py +26 -0
  8. xgae-0.3.2/examples/agent/are/simulation/agents/xga/xga_tool_box.py +109 -0
  9. {xgae-0.3.0 → xgae-0.3.2}/examples/agent/langgraph/reflection/custom_prompt_rag.py +2 -1
  10. {xgae-0.3.0 → xgae-0.3.2}/pyproject.toml +5 -5
  11. {xgae-0.3.0 → xgae-0.3.2}/uv.lock +25 -11
  12. {xgae-0.3.0 → xgae-0.3.2}/xgae/engine/engine_base.py +5 -1
  13. {xgae-0.3.0 → xgae-0.3.2}/xgae/engine/mcp_tool_box.py +22 -14
  14. {xgae-0.3.0 → xgae-0.3.2}/xgae/engine/prompt_builder.py +3 -2
  15. {xgae-0.3.0 → xgae-0.3.2}/xgae/engine/responser/non_stream_responser.py +2 -1
  16. {xgae-0.3.0 → xgae-0.3.2}/xgae/engine/responser/stream_responser.py +3 -2
  17. {xgae-0.3.0 → xgae-0.3.2}/xgae/engine/task_engine.py +7 -5
  18. {xgae-0.3.0 → xgae-0.3.2}/xgae/gaia2/are_engine.py +4 -3
  19. {xgae-0.3.0 → xgae-0.3.2}/xgae/utils/llm_client.py +3 -0
  20. xgae-0.3.0/PKG-INFO +0 -14
  21. xgae-0.3.0/README.md +0 -0
  22. {xgae-0.3.0 → xgae-0.3.2}/.env +0 -0
  23. {xgae-0.3.0 → xgae-0.3.2}/.python-version +0 -0
  24. {xgae-0.3.0 → xgae-0.3.2}/examples/agent/langgraph/reflection/agent_base.py +0 -0
  25. {xgae-0.3.0 → xgae-0.3.2}/examples/agent/langgraph/reflection/reflection_agent.py +0 -0
  26. {xgae-0.3.0 → xgae-0.3.2}/examples/agent/langgraph/reflection/result_eval_agent.py +0 -0
  27. {xgae-0.3.0 → xgae-0.3.2}/examples/agent/langgraph/reflection/run_agent_app.py +0 -0
  28. {xgae-0.3.0 → xgae-0.3.2}/examples/engine/run_custom_and_agent_tools.py +0 -0
  29. {xgae-0.3.0 → xgae-0.3.2}/examples/engine/run_general_tools.py +0 -0
  30. {xgae-0.3.0 → xgae-0.3.2}/examples/engine/run_human_in_loop.py +0 -0
  31. {xgae-0.3.0 → xgae-0.3.2}/examples/engine/run_simple.py +0 -0
  32. {xgae-0.3.0 → xgae-0.3.2}/examples/tools/custom_fault_tools_app.py +0 -0
  33. {xgae-0.3.0 → xgae-0.3.2}/examples/tools/simu_a2a_tools_app.py +0 -0
  34. {xgae-0.3.0 → xgae-0.3.2}/mcpservers/custom_servers.json +0 -0
  35. {xgae-0.3.0 → xgae-0.3.2}/mcpservers/xga_server.json +0 -0
  36. {xgae-0.3.0 → xgae-0.3.2}/mcpservers/xga_server_sse.json +0 -0
  37. {xgae-0.3.0 → xgae-0.3.2}/templates/agent_tool_prompt_template.txt +0 -0
  38. {xgae-0.3.0 → xgae-0.3.2}/templates/custom_tool_prompt_template.txt +0 -0
  39. {xgae-0.3.0 → xgae-0.3.2}/templates/example/fault_user_prompt.txt +0 -0
  40. {xgae-0.3.0 → xgae-0.3.2}/templates/example/result_eval_template.txt +0 -0
  41. {xgae-0.3.0 → xgae-0.3.2}/templates/gemini_system_prompt_template.txt +0 -0
  42. {xgae-0.3.0 → xgae-0.3.2}/templates/general_tool_prompt_template.txt +0 -0
  43. {xgae-0.3.0 → xgae-0.3.2}/templates/system_prompt_response_sample.txt +0 -0
  44. {xgae-0.3.0 → xgae-0.3.2}/templates/system_prompt_template.txt +0 -0
  45. {xgae-0.3.0 → xgae-0.3.2}/test/test_chroma.py +0 -0
  46. {xgae-0.3.0 → xgae-0.3.2}/test/test_langfuse.py +0 -0
  47. {xgae-0.3.0 → xgae-0.3.2}/test/test_litellm_langfuse.py +0 -0
  48. {xgae-0.3.0 → xgae-0.3.2}/xgae/__init__.py +0 -0
  49. {xgae-0.3.0 → xgae-0.3.2}/xgae/engine/responser/responser_base.py +0 -0
  50. {xgae-0.3.0 → xgae-0.3.2}/xgae/engine/task_langfuse.py +0 -0
  51. {xgae-0.3.0 → xgae-0.3.2}/xgae/engine_cli_app.py +0 -0
  52. {xgae-0.3.0 → xgae-0.3.2}/xgae/tools/without_general_tools_app.py +0 -0
  53. {xgae-0.3.0 → xgae-0.3.2}/xgae/utils/__init__.py +0 -0
  54. {xgae-0.3.0 → xgae-0.3.2}/xgae/utils/json_helpers.py +0 -0
  55. {xgae-0.3.0 → xgae-0.3.2}/xgae/utils/misc.py +0 -0
  56. {xgae-0.3.0 → xgae-0.3.2}/xgae/utils/setup_env.py +0 -0
  57. {xgae-0.3.0 → xgae-0.3.2}/xgae/utils/xml_tool_parser.py +0 -0
@@ -1,7 +1,13 @@
1
- ## [0.3.0] - 2025-10-24
1
+ ## [0.3.2] - 2025-10-24
2
2
  ### Added
3
- - Support GAIA2 ARE Engine
3
+ - GAIA2 ARE Example: XGAAreAgent, XGAAreToolBox
4
+ ### Modified
5
+ - ARETaskEngine
6
+
4
7
 
8
+ ## [0.3.0] - 2025-10-22
9
+ ### Added
10
+ - Support GAIA2 ARE: ARETaskEngine
5
11
 
6
12
 
7
13
  ## [0.2.4] - 2025-9-23
xgae-0.3.2/PKG-INFO ADDED
@@ -0,0 +1,36 @@
1
+ Metadata-Version: 2.4
2
+ Name: xgae
3
+ Version: 0.3.2
4
+ Summary: Extreme General Agent Engine
5
+ Requires-Python: >=3.11
6
+ Requires-Dist: colorlog>=6.9.0
7
+ Requires-Dist: langchain-mcp-adapters>=0.1.9
8
+ Requires-Dist: langfuse==2.60.9
9
+ Requires-Dist: litellm>=1.71.1
10
+ Requires-Dist: mcp>=1.11.0
11
+ Provides-Extra: examples
12
+ Requires-Dist: chromadb==1.1.0; extra == 'examples'
13
+ Requires-Dist: langchain-community==0.3.29; extra == 'examples'
14
+ Requires-Dist: langgraph==0.6.5; extra == 'examples'
15
+ Description-Content-Type: text/markdown
16
+
17
+ ## XGAE: Extreme General Agent Engine
18
+ ### Functional Features
19
+ - Support Custom prompt and external MCP Tools
20
+ - Support Langfuse
21
+ - Support Langgraph
22
+ - Support GAIA2 ARE agent
23
+ - Support Human-in-Loop in agent
24
+ - Can Use A2A protocol call A2A Agent as tool by 'xgaproxy' project
25
+ - Can Use E2B or Daytona Sandbox of 'xgatools' project
26
+
27
+ ### Non-Functional Features
28
+ - Faster than SUNA Engine's speed
29
+ - Architecture is lighter than SUNA Engine
30
+ - Separate tools from Agent Engine
31
+
32
+ ### Examples
33
+ - langgraph: Build React mode Langgraph Agent by XGA Engine
34
+ - are: Build GAIA2 ARE Agent by XGA Engine
35
+ - engine: Use XGA Engine in various scenarios
36
+ - tools: Simulation tools for example and test
xgae-0.3.2/README.md ADDED
@@ -0,0 +1,20 @@
1
+ ## XGAE: Extreme General Agent Engine
2
+ ### Functional Features
3
+ - Support Custom prompt and external MCP Tools
4
+ - Support Langfuse
5
+ - Support Langgraph
6
+ - Support GAIA2 ARE agent
7
+ - Support Human-in-Loop in agent
8
+ - Can Use A2A protocol call A2A Agent as tool by 'xgaproxy' project
9
+ - Can Use E2B or Daytona Sandbox of 'xgatools' project
10
+
11
+ ### Non-Functional Features
12
+ - Faster than SUNA Engine's speed
13
+ - Architecture is lighter than SUNA Engine
14
+ - Separate tools from Agent Engine
15
+
16
+ ### Examples
17
+ - langgraph: Build React mode Langgraph Agent by XGA Engine
18
+ - are: Build GAIA2 ARE Agent by XGA Engine
19
+ - engine: Use XGA Engine in various scenarios
20
+ - tools: Simulation tools for example and test
@@ -0,0 +1,30 @@
1
+ ## GAIA2 ARE Support
2
+ ### How to add XGAE to ARE Project
3
+ - add xgae==0.3.2 to ARE requirements.txt
4
+ - uv pip install -r requirements.txt
5
+ - modify ARE 'AgentBuilder' and 'AgentConfigBuilder' class, add "xga" type agent
6
+ ```
7
+ File: agent_builder.py
8
+ class AgentBuilder:
9
+ def list_agents(self) -> list[str]:
10
+ return ["default", "xga"]
11
+
12
+ def build():
13
+ ...
14
+ agent_name = agent_config.get_agent_name()
15
+ if agent_name in ["default", "xga"]:
16
+ # add xga agent code
17
+
18
+ File: agent_config_builder.py
19
+ class AgentConfigBuilder:
20
+ def build():
21
+ if agent_name in["default", "xga"]:
22
+ ```
23
+
24
+ - modify ARE .env, add XGAE .env config, refer to env.example
25
+ - copy XGAE package 'mcpservers' and 'templates' directory to ARE project root
26
+
27
+ ### Run XGA Agent in ARE
28
+ ```
29
+ uv run are-run -e -s scenario_find_image_file -a xga --model openai/qwen3-235b-a22b --provider llama-api --output_dir ./output
30
+ ```
@@ -0,0 +1,42 @@
1
+ # Certificate paths may vary on different types of machines.
2
+ ARE_SIMULATION_SSL_CERT_PATH=
3
+ ARE_SIMULATION_SSL_KEY_PATH=
4
+
5
+ # ARE_SIMULATION UI server.
6
+ ARE_SIMULATION_SERVER_HOSTNAME=localhost
7
+ ARE_SIMULATION_SERVER_PORT=8080
8
+
9
+ # ARE_SIMULATION UI client.
10
+ ARE_SIMULATION_CLIENT_HOSTNAME=localhost
11
+ ARE_SIMULATION_CLIENT_PORT=8088
12
+ ARE_SIMULATION_CLIENT_BACKEND_URL=https://${ARE_SIMULATION_SERVER_HOSTNAME}:${ARE_SIMULATION_SERVER_PORT}
13
+
14
+ # Scenario discovery
15
+ # Format: "dir1,dir2,dir3,..."
16
+ # Example: "/path/to/custom/scenarios,/another/path"
17
+ ARE_SCENARIO_DIRECTORIES="/Users/sharkystar/DevProjects/are"
18
+
19
+ # Models.
20
+ ALL_MODELS=""
21
+
22
+
23
+ DEMO_FS_PATH="hf://datasets/meta-agents-research-environments/gaia2_filesystem/demo_filesystem"
24
+
25
+
26
+ ### XGA ENV ###
27
+ # LOG
28
+ LOG_LEVEL=INFO
29
+ LOG_FILE=log/xgae.log
30
+ LOG_ENABLE=True
31
+
32
+ LLM_MAX_TOKENS=16384
33
+ LLM_TEMPERATURE=0
34
+ LLM_MAX_RETRIES=1
35
+ LLM_STREAM=True
36
+ LLM_ENABLE_THINKING=False
37
+
38
+ LLM_LANGFUSE_ENABLE=False
39
+
40
+
41
+ # TASK_ENGINE
42
+ USE_ASSISTANT_CHUNK_MSG=False
@@ -0,0 +1,289 @@
1
+ import re
2
+ from typing import List, Dict, Any
3
+ from typing_extensions import override
4
+
5
+ from are.simulation.agents.are_simulation_agent import AgentStoppedException
6
+ from are.simulation.agents.llm.types import MMObservation
7
+ from are.simulation.agents.multimodal import Attachment, attachments_to_pil
8
+ from are.simulation.agents.agent_log import (
9
+ LLMInputLog,
10
+ LLMOutputThoughtActionLog,
11
+ StepLog,
12
+ StopLog,
13
+ SystemPromptLog,
14
+ ThoughtLog,
15
+ ToolCallLog,
16
+ FinalAnswerLog
17
+ )
18
+ from are.simulation.agents.default_agent.base_agent import (
19
+ BaseAgent,
20
+ RunningState,
21
+ get_offset_from_time_config_mode
22
+ )
23
+
24
+ from xgae.utils.llm_client import LLMConfig
25
+ from xgae.gaia2.are_engine import ARETaskEngine
26
+ from agent.are.simulation.agents.xga.xga_tool_box import XGAAreToolBox
27
+
28
+
29
+ def pre_run_task_check(agent, iterations: int, llm_messages: List[Dict[str, Any]]):
30
+ try:
31
+ agent.logger.info(f"\n\n------ Starting Run Task Iteration {iterations}... ------")
32
+
33
+ if iterations == 0:
34
+ agent.system_prompt = agent.task_engine.task_prompt
35
+ agent.logger.info(f"------ SYSTEM_PROMPT ------ \n{agent.system_prompt}\n")
36
+ agent.append_agent_log(
37
+ SystemPromptLog(
38
+ content = agent.system_prompt,
39
+ timestamp = agent.make_timestamp(),
40
+ agent_id = agent.agent_id,
41
+ )
42
+ )
43
+
44
+ agent.append_agent_log(
45
+ StepLog(
46
+ iteration = agent.iterations,
47
+ timestamp = agent.make_timestamp(),
48
+ agent_id = agent.agent_id,
49
+ )
50
+ )
51
+
52
+ if agent.stop_event.is_set():
53
+ agent.logger.info(f"pre_run_task_check[{iterations}]: Recv Stop Event before condition, raise AgentStoppedException")
54
+ raise AgentStoppedException("Agent stopped.")
55
+
56
+ # Execute a pre_step() function if it exists
57
+ for conditional_step in agent.conditional_pre_steps:
58
+ if conditional_step.condition is None or conditional_step.condition(agent):
59
+ conditional_step.function(agent)
60
+
61
+ if agent.stop_event.is_set():
62
+ agent.logger.info(f"pre_run_task_check[{iterations}]: Recv Stop Event after condition, raise AgentStoppedException")
63
+ raise AgentStoppedException("Agent stopped.")
64
+
65
+ # Begin step()
66
+ agent.append_agent_log(
67
+ LLMInputLog(
68
+ content = llm_messages,
69
+ timestamp = agent.make_timestamp(),
70
+ agent_id = agent.agent_id
71
+ )
72
+ )
73
+
74
+ if agent.simulated_generation_time_config is not None:
75
+ if agent.pause_env is None:
76
+ raise ValueError("pause_env is not set")
77
+ agent.pause_env()
78
+ except Exception as e:
79
+ agent.log_error(e)
80
+ agent.logger.info(f"pre_run_task_check[{iterations}]: Exception Occur, Stop task")
81
+ agent.task_engine.stop_task()
82
+
83
+
84
+ def post_run_task_check(agent, iterations: int, llm_response: Dict[str, Any]):
85
+ try:
86
+ agent.logger.info(f"------ LLM Response Iteration [{iterations}] ------ \n{llm_response}\n")
87
+
88
+ llm_output = str(llm_response)
89
+
90
+ # Resume the environment after the generation of a thought/action if needed
91
+ if agent.simulated_generation_time_config is not None:
92
+ if agent.resume_env is None:
93
+ raise ValueError("resume_env is not set")
94
+
95
+ offset = get_offset_from_time_config_mode(
96
+ time_config = agent.simulated_generation_time_config,
97
+ completion_duration = 0,
98
+ )
99
+ agent.logger.info(f"post_run_task_check[{iterations}]: Resuming environment with {offset} offset")
100
+ agent.resume_env(offset)
101
+
102
+ metadata = {}
103
+ agent.append_agent_log(
104
+ LLMOutputThoughtActionLog(
105
+ content = llm_output,
106
+ timestamp = agent.make_timestamp(),
107
+ agent_id = agent.agent_id,
108
+ prompt_tokens = metadata.get("prompt_tokens", 0),
109
+ completion_tokens = metadata.get("completion_tokens", 0),
110
+ total_tokens = metadata.get("total_tokens", 0),
111
+ reasoning_tokens = metadata.get("reasoning_tokens", 0),
112
+ completion_duration = metadata.get("completion_duration", 0),
113
+ )
114
+ )
115
+ # end step()
116
+
117
+ if agent.stop_event.is_set():
118
+ agent.logger.info(f"post_run_task_check[{iterations}]: Recv Stop Event, raise AgentStoppedException")
119
+ raise AgentStoppedException("Agent stopped.")
120
+
121
+ # Execute a post_step() function if it exists (polling the Meta Agents Research Environments notifications for example)
122
+ for conditional_step in agent.conditional_post_steps:
123
+ if conditional_step.condition is None or conditional_step.condition(agent):
124
+ conditional_step.function(agent)
125
+ except Exception as e:
126
+ agent.log_error(e)
127
+ agent.logger.info(f"post_run_task_check[{iterations}]: Exception Occur, Stop task")
128
+ agent.task_engine.stop_task()
129
+ finally:
130
+ if agent.simulated_generation_time_config and agent.resume_env:
131
+ agent.resume_env(0.0) # Resume without advancing time
132
+
133
+ agent.iterations += 1
134
+ agent.planning_counter += 1
135
+
136
+
137
+ def terminate_task_check(agent, iterations: int) -> bool:
138
+ is_terminate_task = False
139
+
140
+ try:
141
+ if agent.termination_step.condition:
142
+ is_terminate_task = agent.termination_step.condition(agent)
143
+ if is_terminate_task:
144
+ agent.logger.info(f"terminate_task_check[{iterations}]: termination_step.condition is True")
145
+ except Exception as e:
146
+ agent.log_error(e)
147
+
148
+ return is_terminate_task
149
+
150
+
151
+ class XGAAreAgent(BaseAgent):
152
+ def __init__(self, **kwargs):
153
+ super().__init__(**kwargs)
154
+ self.task_engine: ARETaskEngine = None
155
+
156
+
157
+ @override
158
+ def initialize(self, attachments: list[Attachment] | None = None, **kwargs) -> None:
159
+ self.logs = []
160
+ self.iterations = 0
161
+ self.planning_counter = 0
162
+
163
+ tool_box = XGAAreToolBox(self.tools)
164
+
165
+ _system_prompt = "\n\n".join(prompt for prompt in self.init_system_prompts.values())
166
+ pattern = r'<general_instructions>(.*?)</general_instructions>'
167
+ prompt_are_general = re.search(pattern, _system_prompt, re.DOTALL)
168
+ if prompt_are_general:
169
+ prompt_are_general = prompt_are_general.group(1).strip()
170
+ else:
171
+ prompt_are_general = _system_prompt
172
+
173
+ model_config = self.llm_engine.model_config
174
+ llm_config = LLMConfig(
175
+ model = model_config.model_name,
176
+ api_key = model_config.api_key,
177
+ api_base = model_config.endpoint
178
+ )
179
+ self.task_engine = ARETaskEngine(
180
+ agent = self,
181
+ agent_id = self.agent_id,
182
+ system_prompt = prompt_are_general,
183
+ max_auto_run = self.max_iterations,
184
+ llm_config = llm_config,
185
+ tool_box = tool_box,
186
+ pre_run_task_fn = pre_run_task_check,
187
+ post_run_task_fn = post_run_task_check,
188
+ terminate_task_fn = terminate_task_check,
189
+ )
190
+
191
+ # Reload the agent state if logs are provided
192
+ start_logs = kwargs.pop("start_logs", [])
193
+ if start_logs:
194
+ self.replay(start_logs)
195
+
196
+ # Include additional image PILs directly into state stack.
197
+ if attachments:
198
+ images = attachments_to_pil(attachments)
199
+ self.action_executor.inject_state({f"image_{i}": image for i, image in enumerate(images)})
200
+ self.logger.debug(f"XGAAreAgent initialize: Injecting images into states for {len(images)} images")
201
+ self.logger.debug(f"XGAAreAgent initialize: New Keys {','.join(self.action_executor.state.keys())}")
202
+
203
+ self.initialized = True
204
+
205
+
206
+ @override
207
+ def execute_agent_loop(self) -> str | None | MMObservation:
208
+ import asyncio
209
+ asyncio.run(self.async_execute_agent_loop())
210
+
211
+ # We have reached a termination condition, execute the termination method
212
+ if self.termination_step.function is not None and not self.stop_event.is_set():
213
+ return self.termination_step.function(self)
214
+
215
+
216
+ async def async_execute_agent_loop(self) -> str | None | MMObservation:
217
+ chunks = []
218
+ async for chunk in self.task_engine.run_task(task_input={"role": "user", "content": self.task}):
219
+ chunks.append(chunk)
220
+ chunk_type = chunk['type']
221
+ if chunk_type== "status":
222
+ status_content = chunk['content']
223
+ status_type = status_content['status_type']
224
+ if status_type == "error":
225
+ error_msg = chunk.get('message')
226
+ self.logger.warning(f"XGAAreAgent execute_agent_loop: Fatal error - {error_msg}")
227
+ self.log_error(error_msg)
228
+ elif status_type == "stop":
229
+ error_msg = chunk.get('message')
230
+ self.logger.warning("XGAAreAgent execute_agent_loop: Agent stopped.")
231
+ self.append_agent_log(
232
+ StopLog(
233
+ content = f"Agent stopped - {error_msg}",
234
+ timestamp = self.make_timestamp(),
235
+ agent_id = self.agent_id,
236
+ )
237
+ )
238
+ elif status_type == "tool_started":
239
+ function_name = status_content['function_name']
240
+ self.append_agent_log(
241
+ ThoughtLog(
242
+ content = f"To complete task, should call '{function_name}' tool",
243
+ timestamp = self.make_timestamp(),
244
+ agent_id = self.agent_id,
245
+ )
246
+ )
247
+ elif chunk_type == "tool":
248
+ tool_content = chunk['content']
249
+ tool_execution = tool_content.get('tool_execution')
250
+ self.append_agent_log(
251
+ ToolCallLog(
252
+ tool_name = tool_execution.get('function_name'),
253
+ tool_arguments = tool_execution.get('arguments'),
254
+ timestamp = self.make_timestamp(),
255
+ agent_id = self.agent_id,
256
+ )
257
+ )
258
+ #print(chunk)
259
+
260
+ final_result = self.task_engine.parse_final_result(chunks)
261
+ print(f"\n\nFINAL_RESULT: {final_result}")
262
+
263
+ # Send Final Result to user
264
+ args = {
265
+ 'content': final_result['content']
266
+ }
267
+ self.tools['AgentUserInterface__send_message_to_user'](**args)
268
+ self.append_agent_log(
269
+ ToolCallLog(
270
+ tool_name = 'AgentUserInterface__send_message_to_user',
271
+ tool_arguments = args,
272
+ timestamp = self.make_timestamp(),
273
+ agent_id = self.agent_id,
274
+ )
275
+ )
276
+
277
+ # Return Final Result
278
+ if final_result['type'] == "error":
279
+ self.custom_state["running_state"] = RunningState.FAILED
280
+ else:
281
+ self.custom_state["running_state"] = RunningState.TERMINATED
282
+
283
+ self.append_agent_log(
284
+ FinalAnswerLog(
285
+ content = final_result['content'],
286
+ timestamp = self.make_timestamp(),
287
+ agent_id = self.agent_id
288
+ )
289
+ )
@@ -0,0 +1,26 @@
1
+ from are.simulation.agents.are_simulation_agent_config import ARESimulationReactBaseAgentConfig
2
+ from are.simulation.agents.default_agent.steps.are_simulation import get_are_simulation_update_pre_step
3
+ from are.simulation.agents.default_agent.termination_methods.are_simulation import get_gaia2_termination_step
4
+ from are.simulation.agents.default_agent.tools.json_action_executor import JsonActionExecutor
5
+ from are.simulation.agents.llm.llm_engine import LLMEngine
6
+
7
+ from agent.are.simulation.agents.xga.xga_agent import XGAAreAgent
8
+
9
+
10
+ def xga_simulation_react_xml_agent(
11
+ llm_engine: LLMEngine, base_agent_config: ARESimulationReactBaseAgentConfig
12
+ ):
13
+ return XGAAreAgent(
14
+ llm_engine=llm_engine,
15
+ tools={},
16
+ system_prompts={
17
+ "system_prompt": str(base_agent_config.system_prompt),
18
+ },
19
+ termination_step=get_gaia2_termination_step(),
20
+ max_iterations=base_agent_config.max_iterations,
21
+ action_executor=JsonActionExecutor( # Just for compatible BaseAgent, useless
22
+ use_custom_logger=base_agent_config.use_custom_logger
23
+ ),
24
+ conditional_pre_steps=[get_are_simulation_update_pre_step()],
25
+ use_custom_logger=base_agent_config.use_custom_logger,
26
+ )
@@ -0,0 +1,109 @@
1
+ import json
2
+ import logging
3
+
4
+ from typing import List, Any, Dict, Optional
5
+ from typing_extensions import override
6
+
7
+ from langchain_mcp_adapters.tools import load_mcp_tools
8
+
9
+ from xgae.engine.engine_base import XGAError, XGAToolSchema, XGAToolResult, XGAToolType
10
+ from xgae.engine.mcp_tool_box import XGAMcpToolBox
11
+
12
+ from are.simulation.tools import Tool
13
+
14
+ class XGAAreToolBox(XGAMcpToolBox):
15
+ def __init__(self, are_tools: Dict[str, Tool]):
16
+ super().__init__()
17
+ self.are_tools = are_tools
18
+ self._is_loaded_are_tool_schemas = False
19
+
20
+
21
+ @override
22
+ async def init_tool_schemas(self):
23
+ await self._load_mcp_tools_schema()
24
+ self._load_are_tools_schema()
25
+
26
+
27
+ @override
28
+ async def call_tool(self, task_id: str, tool_name: str, args: Optional[Dict[str, Any]] = None) -> XGAToolResult:
29
+ if tool_name == "end_task":
30
+ server_name = self.GENERAL_MCP_SERVER_NAME
31
+ else:
32
+ task_tool_schemas = self.task_tool_schemas.get(task_id, {})
33
+ tool_schema = task_tool_schemas.get(tool_name, None)
34
+ if tool_schema is None:
35
+ raise XGAError(f"MCP tool not found: '{tool_name}'")
36
+ server_name = tool_schema.server_name
37
+
38
+ tool_type = self._get_tool_type(server_name)
39
+ if tool_type == "custom": # ARE Tools
40
+ full_tool_name = server_name + "__" + tool_name
41
+
42
+ try:
43
+ tool_result = self.are_tools[full_tool_name](**args)
44
+ result = XGAToolResult(success=True, output=str(tool_result))
45
+ except Exception as e:
46
+ error = f"Call ARE Tool '{tool_name}' error: {str(e)}"
47
+ logging.error(f"AreToolBox call_are_tool: {error}")
48
+ result = XGAToolResult(success=False, output=error)
49
+ else:
50
+ async with self._mcp_client.session(server_name) as session:
51
+ tools = await load_mcp_tools(session)
52
+ mcp_tool = next((t for t in tools if t.name == tool_name), None)
53
+
54
+ if mcp_tool:
55
+ tool_args = args or {}
56
+
57
+ if tool_type == "general" or tool_type == "agent":
58
+ tool_args = dict({'task_id': task_id}, **tool_args)
59
+
60
+ try:
61
+ tool_result = await mcp_tool.arun(tool_args)
62
+ if tool_type == "general":
63
+ tool_result = json.loads(tool_result)
64
+ result = XGAToolResult(success=tool_result['success'], output=str(tool_result['output']))
65
+ else:
66
+ result = XGAToolResult(success=True, output=str(tool_result))
67
+ except Exception as e:
68
+ error = f"Call MCP Tool '{tool_name}' error: {str(e)}"
69
+ logging.error(f"AreToolBox call_mcp_tool: {error}")
70
+ result = XGAToolResult(success=False, output=error)
71
+ else:
72
+ error = f"No MCP tool found with name: {tool_name}"
73
+ logging.info(f"AreToolBox call_mcp_tool: error={error}")
74
+ result = XGAToolResult(success=False, output=error)
75
+
76
+ return result
77
+
78
+
79
+ async def reload_mcp_tools_schema(self) -> None:
80
+ self._is_loaded_mcp_tool_schemas = False
81
+ self._is_loaded_are_tool_schemas = False
82
+ await self.init_tool_schemas()
83
+
84
+
85
+ def _load_are_tools_schema(self) -> None:
86
+ if not self._is_loaded_are_tool_schemas:
87
+ for are_tool in self.are_tools.values():
88
+ full_tool_name = are_tool.name
89
+ server_name , tool_name = full_tool_name.split("__")
90
+ tool_type :XGAToolType = "custom"
91
+ input_schema = {
92
+ 'properties': are_tool.inputs,
93
+ 'required': []
94
+ }
95
+
96
+ tool_schema = XGAToolSchema(tool_name = tool_name,
97
+ tool_type = tool_type,
98
+ server_name = server_name,
99
+ description = are_tool.description,
100
+ input_schema = input_schema,
101
+ metadata = {}
102
+ )
103
+ if server_name not in self.mcp_tool_schemas:
104
+ self.mcp_tool_schemas[server_name] = []
105
+ self.mcp_tool_schemas[server_name].append(tool_schema)
106
+ if server_name not in self.mcp_server_names:
107
+ self.mcp_server_names.append(server_name)
108
+
109
+ self._is_loaded_are_tool_schemas = True
@@ -1,6 +1,7 @@
1
1
  import logging
2
2
  import os
3
- from typing import override, List
3
+ from typing import List
4
+ from typing_extensions import override
4
5
 
5
6
  from langchain_core.documents import Document
6
7
  from langchain_core.embeddings import Embeddings
@@ -1,15 +1,15 @@
1
1
  [project]
2
2
  name = "xgae"
3
- version = "0.3.0"
3
+ version = "0.3.2"
4
4
  description = "Extreme General Agent Engine"
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.11"
7
7
  dependencies = [
8
- "colorlog==6.9.0",
9
- "litellm==1.74.15",
10
- "mcp==1.13.0",
8
+ "colorlog>=6.9.0",
9
+ "litellm>=1.71.1",
10
+ "mcp>=1.11.0",
11
11
  "langfuse==2.60.9",
12
- "langchain-mcp-adapters==0.1.9",
12
+ "langchain-mcp-adapters>=0.1.9",
13
13
  ]
14
14
 
15
15
 
@@ -733,6 +733,19 @@ wheels = [
733
733
  { url = "https://pypi.tuna.tsinghua.edu.cn/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517, upload-time = "2024-12-06T15:37:21.509Z" },
734
734
  ]
735
735
 
736
+ [[package]]
737
+ name = "httpx-aiohttp"
738
+ version = "0.1.9"
739
+ source = { registry = "https://pypi.tuna.tsinghua.edu.cn/simple" }
740
+ dependencies = [
741
+ { name = "aiohttp" },
742
+ { name = "httpx" },
743
+ ]
744
+ sdist = { url = "https://pypi.tuna.tsinghua.edu.cn/packages/d8/f2/9a86ce9bc48cf57dabb3a3160dfed26d8bbe5a2478a51f9d1dbf89f2f1fc/httpx_aiohttp-0.1.9.tar.gz", hash = "sha256:4ee8b22e6f2e7c80cd03be29eff98bfe7d89bd77f021ce0b578ee76b73b4bfe6", size = 206023, upload-time = "2025-10-15T08:52:57.475Z" }
745
+ wheels = [
746
+ { url = "https://pypi.tuna.tsinghua.edu.cn/packages/a1/db/5cfa8254a86c34a1ab7fe0dbec9f81bb5ebd831cbdd65aa4be4f37027804/httpx_aiohttp-0.1.9-py3-none-any.whl", hash = "sha256:3dc2845568b07742588710fcf3d72db2cbcdf2acc93376edf85f789c4d8e5fda", size = 6180, upload-time = "2025-10-15T08:52:56.521Z" },
747
+ ]
748
+
736
749
  [[package]]
737
750
  name = "httpx-sse"
738
751
  version = "0.4.1"
@@ -1125,12 +1138,13 @@ wheels = [
1125
1138
 
1126
1139
  [[package]]
1127
1140
  name = "litellm"
1128
- version = "1.74.15"
1141
+ version = "1.71.1"
1129
1142
  source = { registry = "https://pypi.tuna.tsinghua.edu.cn/simple" }
1130
1143
  dependencies = [
1131
1144
  { name = "aiohttp" },
1132
1145
  { name = "click" },
1133
1146
  { name = "httpx" },
1147
+ { name = "httpx-aiohttp" },
1134
1148
  { name = "importlib-metadata" },
1135
1149
  { name = "jinja2" },
1136
1150
  { name = "jsonschema" },
@@ -1140,9 +1154,9 @@ dependencies = [
1140
1154
  { name = "tiktoken" },
1141
1155
  { name = "tokenizers" },
1142
1156
  ]
1143
- sdist = { url = "https://pypi.tuna.tsinghua.edu.cn/packages/4b/13/90865340f76d54c1c2709074f882bf8ef3be0ac612a2164da5eed07ebe11/litellm-1.74.15.tar.gz", hash = "sha256:530a4b3918c02f87079ca7efb77eaf13d31f281218863f65b2da4bd863790677", size = 9748743, upload-time = "2025-08-02T21:49:08.631Z" }
1157
+ sdist = { url = "https://pypi.tuna.tsinghua.edu.cn/packages/8d/f7/1d51b81e608543c6a79b275a4e2b43f6b4b7f32588566b303102014b725d/litellm-1.71.1.tar.gz", hash = "sha256:c20e5917fdbe771ba4b6d1862b3d38d6e89cfba53e85bb337013f848256566eb", size = 7935681, upload-time = "2025-05-25T14:24:10.744Z" }
1144
1158
  wheels = [
1145
- { url = "https://pypi.tuna.tsinghua.edu.cn/packages/b9/9c/f4dd62d790d65c924699450d8789df437e63c8d99229e95bd7467ce7b82a/litellm-1.74.15-py3-none-any.whl", hash = "sha256:bf3db744dee982e53196cb688097ba76e2867004e39ad201b3761c5de9e3265b", size = 8835438, upload-time = "2025-08-02T21:49:05.817Z" },
1159
+ { url = "https://pypi.tuna.tsinghua.edu.cn/packages/34/0e/328e077a66726dc1d4cfee37c67548b151a33a6d196a32737c85a034712b/litellm-1.71.1-py3-none-any.whl", hash = "sha256:9b94e250c58fba3c87c6ebb77e33c1cc8aa9110cee99dfdc37b368a11cec57c7", size = 7921387, upload-time = "2025-05-25T14:24:08.205Z" },
1146
1160
  ]
1147
1161
 
1148
1162
  [[package]]
@@ -1219,7 +1233,7 @@ wheels = [
1219
1233
 
1220
1234
  [[package]]
1221
1235
  name = "mcp"
1222
- version = "1.13.0"
1236
+ version = "1.11.0"
1223
1237
  source = { registry = "https://pypi.tuna.tsinghua.edu.cn/simple" }
1224
1238
  dependencies = [
1225
1239
  { name = "anyio" },
@@ -1234,9 +1248,9 @@ dependencies = [
1234
1248
  { name = "starlette" },
1235
1249
  { name = "uvicorn", marker = "sys_platform != 'emscripten'" },
1236
1250
  ]
1237
- sdist = { url = "https://pypi.tuna.tsinghua.edu.cn/packages/d3/a8/564c094de5d6199f727f5d9f5672dbec3b00dfafd0f67bf52d995eaa5951/mcp-1.13.0.tar.gz", hash = "sha256:70452f56f74662a94eb72ac5feb93997b35995e389b3a3a574e078bed2aa9ab3", size = 434709, upload-time = "2025-08-14T15:03:58.58Z" }
1251
+ sdist = { url = "https://pypi.tuna.tsinghua.edu.cn/packages/3a/f5/9506eb5578d5bbe9819ee8ba3198d0ad0e2fbe3bab8b257e4131ceb7dfb6/mcp-1.11.0.tar.gz", hash = "sha256:49a213df56bb9472ff83b3132a4825f5c8f5b120a90246f08b0dac6bedac44c8", size = 406907, upload-time = "2025-07-10T16:41:09.388Z" }
1238
1252
  wheels = [
1239
- { url = "https://pypi.tuna.tsinghua.edu.cn/packages/8b/6b/46b8bcefc2ee9e2d2e8d2bd25f1c2512f5a879fac4619d716b194d6e7ccc/mcp-1.13.0-py3-none-any.whl", hash = "sha256:8b1a002ebe6e17e894ec74d1943cc09aa9d23cb931bf58d49ab2e9fa6bb17e4b", size = 160226, upload-time = "2025-08-14T15:03:56.641Z" },
1253
+ { url = "https://pypi.tuna.tsinghua.edu.cn/packages/92/9c/c9ca79f9c512e4113a5d07043013110bb3369fc7770040c61378c7fbcf70/mcp-1.11.0-py3-none-any.whl", hash = "sha256:58deac37f7483e4b338524b98bc949b7c2b7c33d978f5fafab5bde041c5e2595", size = 155880, upload-time = "2025-07-10T16:41:07.935Z" },
1240
1254
  ]
1241
1255
 
1242
1256
  [[package]]
@@ -2994,7 +3008,7 @@ wheels = [
2994
3008
 
2995
3009
  [[package]]
2996
3010
  name = "xgae"
2997
- version = "0.3.0"
3011
+ version = "0.3.2"
2998
3012
  source = { editable = "." }
2999
3013
  dependencies = [
3000
3014
  { name = "colorlog" },
@@ -3014,13 +3028,13 @@ examples = [
3014
3028
  [package.metadata]
3015
3029
  requires-dist = [
3016
3030
  { name = "chromadb", marker = "extra == 'examples'", specifier = "==1.1.0" },
3017
- { name = "colorlog", specifier = "==6.9.0" },
3031
+ { name = "colorlog", specifier = ">=6.9.0" },
3018
3032
  { name = "langchain-community", marker = "extra == 'examples'", specifier = "==0.3.29" },
3019
- { name = "langchain-mcp-adapters", specifier = "==0.1.9" },
3033
+ { name = "langchain-mcp-adapters", specifier = ">=0.1.9" },
3020
3034
  { name = "langfuse", specifier = "==2.60.9" },
3021
3035
  { name = "langgraph", marker = "extra == 'examples'", specifier = "==0.6.5" },
3022
- { name = "litellm", specifier = "==1.74.15" },
3023
- { name = "mcp", specifier = "==1.13.0" },
3036
+ { name = "litellm", specifier = ">=1.71.1" },
3037
+ { name = "mcp", specifier = ">=1.11.0" },
3024
3038
  ]
3025
3039
  provides-extras = ["examples"]
3026
3040
 
@@ -6,7 +6,7 @@ class XGAError(Exception):
6
6
  """Custom exception for errors in the XGA system."""
7
7
  pass
8
8
 
9
- XGAMsgStatusType = Literal["error", "finish", "tool_started", "tool_completed", "tool_error", "tool_failed"]
9
+ XGAMsgStatusType = Literal["error", "stop", "finish", "tool_started", "tool_completed", "tool_error", "tool_failed"]
10
10
  XGAResponseMsgType = Literal["user", "status", "tool", "assistant", "assistant_chunk"]
11
11
 
12
12
  class XGAResponseMessage(TypedDict, total=False):
@@ -40,6 +40,10 @@ class XGAToolResult:
40
40
 
41
41
 
42
42
  class XGAToolBox(ABC):
43
+ @abstractmethod
44
+ async def init_tool_schemas(self):
45
+ pass
46
+
43
47
  @abstractmethod
44
48
  async def creat_task_tool_box(self, task_id: str, general_tools: List[str], custom_tools: List[str]):
45
49
  pass
@@ -2,7 +2,8 @@ import json
2
2
  import logging
3
3
  import os
4
4
 
5
- from typing import List, Any, Dict, Optional, Literal, override
5
+ from typing import List, Any, Dict, Optional, Literal
6
+ from typing_extensions import override
6
7
 
7
8
  from langchain_mcp_adapters.client import MultiServerMCPClient
8
9
  from langchain_mcp_adapters.tools import load_mcp_tools
@@ -33,7 +34,11 @@ class XGAMcpToolBox(XGAToolBox):
33
34
  self.mcp_tool_schemas: Dict[str, List[XGAToolSchema]] = {}
34
35
  self.task_tool_schemas: Dict[str, Dict[str,XGAToolSchema]] = {}
35
36
 
36
- self.is_loaded_tool_schemas = False
37
+ self._is_loaded_mcp_tool_schemas = False
38
+
39
+ @override
40
+ async def init_tool_schemas(self):
41
+ await self._load_mcp_tools_schema()
37
42
 
38
43
  @override
39
44
  async def creat_task_tool_box(self, task_id: str, general_tools: List[str], custom_tools: List[str]):
@@ -139,8 +144,8 @@ class XGAMcpToolBox(XGAToolBox):
139
144
  return result
140
145
 
141
146
 
142
- async def load_mcp_tools_schema(self)-> None:
143
- if not self.is_loaded_tool_schemas:
147
+ async def _load_mcp_tools_schema(self)-> None:
148
+ if not self._is_loaded_mcp_tool_schemas:
144
149
  for server_name in self.mcp_server_names:
145
150
  self.mcp_tool_schemas[server_name] = []
146
151
  try:
@@ -162,18 +167,21 @@ class XGAMcpToolBox(XGAToolBox):
162
167
  param_properties.pop('title', None)
163
168
 
164
169
  metadata = tool.metadata or {}
165
- tool_schema = XGAToolSchema(tool_name=tool.name,
166
- tool_type=tool_type,
167
- server_name=server_name,
168
- description=tool.description,
169
- input_schema=input_schema,
170
- metadata=metadata)
170
+ tool_schema = XGAToolSchema(
171
+ tool_name = tool.name,
172
+ tool_type = tool_type,
173
+ server_name = server_name,
174
+ description = tool.description,
175
+ input_schema = input_schema,
176
+ metadata = metadata
177
+ )
171
178
  self.mcp_tool_schemas[server_name].append(tool_schema)
172
- self.is_loaded_tool_schemas = True
179
+
180
+ self._is_loaded_mcp_tool_schemas = True
173
181
 
174
182
  async def reload_mcp_tools_schema(self) -> None:
175
- self.is_loaded_tool_schemas = False
176
- await self.load_mcp_tools_schema()
183
+ self._is_loaded_mcp_tool_schemas = False
184
+ await self.init_tool_schemas()
177
185
 
178
186
 
179
187
  def _load_mcp_servers_config(self, mcp_config_path: str) -> Dict[str, Any]:
@@ -219,7 +227,7 @@ if __name__ == "__main__":
219
227
  #mcp_tool_box = XGAMcpToolBox()
220
228
 
221
229
  task_id = "task1"
222
- await mcp_tool_box.load_mcp_tools_schema()
230
+ await mcp_tool_box.init_tool_schemas()
223
231
  await mcp_tool_box.creat_task_tool_box(task_id=task_id, general_tools=["*"], custom_tools=["*"])
224
232
  tool_schemas = mcp_tool_box.get_task_tool_schemas(task_id, "general")
225
233
  print("general_tools_schemas" + "*"*50)
@@ -83,9 +83,10 @@ class XGAPromptBuilder():
83
83
  tool_info = ""
84
84
  for tool_schema in tool_schemas:
85
85
  description = tool_schema.description if tool_schema.description else 'No description available'
86
- tool_info += f"- **{tool_schema.tool_name}**: {description}\n"
86
+ tool_info += f"- {tool_schema.tool_name}: {description}\n"
87
87
  parameters = tool_schema.input_schema.get('properties', {})
88
- tool_info += f" Parameters: {parameters}\n"
88
+ tool_info += f" Parameters: {parameters}\n"
89
+ tool_info += "\n"
89
90
  tool_prompt = tool_prompt.replace("{tool_schemas}", tool_info)
90
91
 
91
92
  return tool_prompt
@@ -1,6 +1,7 @@
1
1
  import logging
2
2
 
3
- from typing import List, Dict, Any, AsyncGenerator, override,Optional
3
+ from typing import List, Dict, Any, AsyncGenerator,Optional
4
+ from typing_extensions import override
4
5
 
5
6
  from xgae.utils import log_trace
6
7
 
@@ -1,6 +1,7 @@
1
1
  import logging
2
- import asyncio
3
- from typing import List, Dict, Any, Optional, AsyncGenerator, override
2
+
3
+ from typing import List, Dict, Any, Optional, AsyncGenerator
4
+ from typing_extensions import override
4
5
 
5
6
  from xgae.utils import log_trace
6
7
 
@@ -116,8 +116,7 @@ class XGATaskEngine:
116
116
  general_tools.append("ask")
117
117
 
118
118
  custom_tools = self.custom_tools or []
119
- if isinstance(self.tool_box, XGAMcpToolBox):
120
- await self.tool_box.load_mcp_tools_schema()
119
+ await self.tool_box.init_tool_schemas()
121
120
 
122
121
  await self.tool_box.creat_task_tool_box(self.task_id, general_tools, custom_tools)
123
122
  general_tool_schemas = self.tool_box.get_task_tool_schemas(self.task_id, "general")
@@ -201,6 +200,9 @@ class XGATaskEngine:
201
200
  yield error_msg
202
201
  finally:
203
202
  if not self.running_task_checkpoint("termination_check", iterations):
203
+ status_content = {'status_type': "stop", 'role': "system", 'message': "Task is termiated by Stop Command"}
204
+ error_msg = self.add_response_message(type="status", content=status_content, is_llm_message=False)
205
+ yield error_msg
204
206
  break
205
207
 
206
208
  async def _run_task_once(self, continuous_state: TaskRunContinuousState) -> AsyncGenerator[Dict[str, Any], None]:
@@ -239,8 +241,8 @@ class XGATaskEngine:
239
241
  reverse_chunks = reversed(chunks)
240
242
  chunk = None
241
243
 
242
- if self.terminate_task:
243
- return XGATaskResult(type="error", content="LLM Task is terminated !")
244
+ # if self.terminate_task:
245
+ # return XGATaskResult(type="error", content="LLM Task is terminated !")
244
246
 
245
247
  try:
246
248
  finish_reason = ''
@@ -249,7 +251,7 @@ class XGATaskEngine:
249
251
  if chunk_type == "status":
250
252
  status_content = chunk['content']
251
253
  status_type = status_content['status_type']
252
- if status_type == "error":
254
+ if status_type == "error" or status_type == "stop":
253
255
  error = status_content['message']
254
256
  final_result = XGATaskResult(type="error", content=error)
255
257
  elif status_type == "finish":
@@ -1,5 +1,6 @@
1
1
  import logging
2
- from typing import Any, Optional, override, Callable, Literal, Dict, List
2
+ from typing import Any, Optional, Callable, Literal, Dict, List
3
+ from typing_extensions import override
3
4
 
4
5
  from xgae.engine.engine_base import XGAToolBox
5
6
  from xgae.engine.task_engine import XGATaskEngine
@@ -81,12 +82,12 @@ if __name__ == "__main__":
81
82
 
82
83
  def terminate_task(agent, iterations: int) -> bool:
83
84
  logging.info(f"terminate_task: iterations={iterations}")
84
- return iterations > 6 # can test terminate by > 3
85
+ return iterations > 3 # can test terminate by > 3
85
86
 
86
87
 
87
88
  async def main():
88
89
  # Before Run Exec: uv run example-fault-tools
89
- # LLAMA_API_KEY ,
90
+ # LLAMA_API_KEY , LLAMA_API_BASE
90
91
  tool_box = XGAMcpToolBox(custom_mcp_server_file="mcpservers/custom_servers.json")
91
92
  system_prompt = read_file("templates/example/fault_user_prompt.txt")
92
93
  llm_config = LLMConfig(
@@ -51,6 +51,9 @@ class LLMClient:
51
51
  self._init_langfuse()
52
52
 
53
53
  llm_config = llm_config or LLMConfig()
54
+ if llm_config.get('model') and llm_config.get('model_name') is None:
55
+ llm_config['model_name'] = llm_config.get('model')
56
+
54
57
  self.max_retries = int(os.getenv('LLM_MAX_RETRIES', 1))
55
58
 
56
59
  env_llm_model = os.getenv('LLM_MODEL', "openai/qwen3-235b-a22b")
xgae-0.3.0/PKG-INFO DELETED
@@ -1,14 +0,0 @@
1
- Metadata-Version: 2.4
2
- Name: xgae
3
- Version: 0.3.0
4
- Summary: Extreme General Agent Engine
5
- Requires-Python: >=3.11
6
- Requires-Dist: colorlog==6.9.0
7
- Requires-Dist: langchain-mcp-adapters==0.1.9
8
- Requires-Dist: langfuse==2.60.9
9
- Requires-Dist: litellm==1.74.15
10
- Requires-Dist: mcp==1.13.0
11
- Provides-Extra: examples
12
- Requires-Dist: chromadb==1.1.0; extra == 'examples'
13
- Requires-Dist: langchain-community==0.3.29; extra == 'examples'
14
- Requires-Dist: langgraph==0.6.5; extra == 'examples'
xgae-0.3.0/README.md DELETED
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes