xgae 0.1.12__tar.gz → 0.1.15__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of xgae might be problematic. Click here for more details.

Files changed (53) hide show
  1. {xgae-0.1.12 → xgae-0.1.15}/.env +3 -1
  2. {xgae-0.1.12 → xgae-0.1.15}/PKG-INFO +1 -1
  3. {xgae-0.1.12 → xgae-0.1.15}/pyproject.toml +3 -3
  4. xgae-0.1.15/release.md +12 -0
  5. {xgae-0.1.12 → xgae-0.1.15}/src/examples/agent/langgraph/react/react_agent.py +3 -4
  6. xgae-0.1.15/src/examples/engine/run_general_tools.py +16 -0
  7. {xgae-0.1.12 → xgae-0.1.15}/src/examples/engine/run_human_in_loop.py +1 -1
  8. {xgae-0.1.12 → xgae-0.1.15}/src/examples/engine/run_user_prompt.py +1 -1
  9. {xgae-0.1.12 → xgae-0.1.15}/src/examples/tools/custom_fault_tools_app.py +2 -1
  10. {xgae-0.1.12 → xgae-0.1.15}/src/xgae/engine/mcp_tool_box.py +15 -9
  11. {xgae-0.1.12 → xgae-0.1.15}/src/xgae/engine/prompt_builder.py +9 -8
  12. {xgae-0.1.12 → xgae-0.1.15}/src/xgae/engine/responser/non_stream_responser.py +16 -17
  13. xgae-0.1.15/src/xgae/engine/responser/responser_base.py +529 -0
  14. xgae-0.1.15/src/xgae/engine/responser/stream_responser.py +256 -0
  15. {xgae-0.1.12 → xgae-0.1.15}/src/xgae/engine/task_engine.py +106 -110
  16. {xgae-0.1.12 → xgae-0.1.15}/src/xgae/engine/task_langfuse.py +11 -11
  17. {xgae-0.1.12 → xgae-0.1.15}/src/xgae/tools/without_general_tools_app.py +3 -2
  18. xgae-0.1.15/src/xgae/utils/__init__.py +27 -0
  19. {xgae-0.1.12 → xgae-0.1.15}/src/xgae/utils/json_helpers.py +0 -29
  20. {xgae-0.1.12 → xgae-0.1.15}/src/xgae/utils/llm_client.py +23 -23
  21. {xgae-0.1.12 → xgae-0.1.15}/src/xgae/utils/misc.py +1 -2
  22. {xgae-0.1.12 → xgae-0.1.15}/src/xgae/utils/xml_tool_parser.py +7 -7
  23. {xgae-0.1.12 → xgae-0.1.15}/uv.lock +1 -1
  24. xgae-0.1.12/.idea/.gitignore +0 -8
  25. xgae-0.1.12/.idea/ai_toolkit.xml +0 -6
  26. xgae-0.1.12/.idea/inspectionProfiles/Project_Default.xml +0 -15
  27. xgae-0.1.12/.idea/inspectionProfiles/profiles_settings.xml +0 -6
  28. xgae-0.1.12/.idea/misc.xml +0 -7
  29. xgae-0.1.12/.idea/modules.xml +0 -8
  30. xgae-0.1.12/.idea/vcs.xml +0 -4
  31. xgae-0.1.12/.idea/workspace.xml +0 -225
  32. xgae-0.1.12/.idea/xgae.iml +0 -11
  33. xgae-0.1.12/src/xgae/engine/responser/responser_base.py +0 -616
  34. xgae-0.1.12/src/xgae/engine/responser/stream_responser.py +0 -138
  35. xgae-0.1.12/src/xgae/utils/__init__.py +0 -15
  36. {xgae-0.1.12 → xgae-0.1.15}/.python-version +0 -0
  37. {xgae-0.1.12 → xgae-0.1.15}/README.md +0 -0
  38. {xgae-0.1.12 → xgae-0.1.15}/mcpservers/custom_servers.json +0 -0
  39. {xgae-0.1.12 → xgae-0.1.15}/mcpservers/xga_server.json +0 -0
  40. {xgae-0.1.12 → xgae-0.1.15}/mcpservers/xga_server_sse.json +0 -0
  41. {xgae-0.1.12 → xgae-0.1.15}/src/examples/engine/run_simple.py +0 -0
  42. {xgae-0.1.12 → xgae-0.1.15}/src/xgae/__init__.py +0 -0
  43. {xgae-0.1.12 → xgae-0.1.15}/src/xgae/cli_app.py +0 -0
  44. {xgae-0.1.12 → xgae-0.1.15}/src/xgae/engine/engine_base.py +0 -0
  45. {xgae-0.1.12 → xgae-0.1.15}/src/xgae/utils/setup_env.py +0 -0
  46. {xgae-0.1.12 → xgae-0.1.15}/templates/custom_tool_prompt_template.txt +0 -0
  47. {xgae-0.1.12 → xgae-0.1.15}/templates/example/fault_user_prompt.txt +0 -0
  48. {xgae-0.1.12 → xgae-0.1.15}/templates/gemini_system_prompt_template.txt +0 -0
  49. {xgae-0.1.12 → xgae-0.1.15}/templates/general_tool_prompt_template.txt +0 -0
  50. {xgae-0.1.12 → xgae-0.1.15}/templates/system_prompt_response_sample.txt +0 -0
  51. {xgae-0.1.12 → xgae-0.1.15}/templates/system_prompt_template.txt +0 -0
  52. {xgae-0.1.12 → xgae-0.1.15}/test/test_langfuse.py +0 -0
  53. {xgae-0.1.12 → xgae-0.1.15}/test/test_litellm_langfuse.py +0 -0
@@ -3,17 +3,19 @@ LOG_LEVEL=DEBUG
3
3
  LOG_FILE=log/xgae.log
4
4
  LOG_ENABLE=True
5
5
 
6
+
6
7
  # LANGFUSE
7
8
  LANGFUSE_PUBLIC_KEY=
8
9
  LANGFUSE_SECRET_KEY=
9
10
  LANGFUSE_HOST=https://cloud.langfuse.com
10
11
 
12
+
11
13
  # LLM
12
14
  LLM_MODEL=openai/qwen3-235b-a22b
13
15
  LLM_API_BASE=https://dashscope.aliyuncs.com/compatible-mode/v1
14
16
  LLM_API_KEY=
15
17
  LLM_MAX_TOKENS=16384
16
- LLM_TEMPERATURE=0.7
18
+ LLM_TEMPERATURE=0
17
19
  LLM_MAX_RETRIES=2
18
20
  LLM_STREAM=True
19
21
  LLM_ENABLE_THINKING=False
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: xgae
3
- Version: 0.1.12
3
+ Version: 0.1.15
4
4
  Summary: Extreme General Agent Engine
5
5
  Requires-Python: >=3.13
6
6
  Requires-Dist: colorlog==6.9.0
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "xgae"
3
- version = "0.1.12"
3
+ version = "0.1.15"
4
4
  description = "Extreme General Agent Engine"
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.13"
@@ -18,9 +18,9 @@ requires = ["hatchling"]
18
18
  build-backend = "hatchling.build"
19
19
 
20
20
  [tool.hatch.build]
21
- exclude = ["log/*"]
21
+ exclude = ["log/*", ".idea/*"]
22
22
 
23
23
  [project.scripts]
24
24
  xgae = "xgae.cli_app:main"
25
25
  xgae-tools = "xgae.tools.without_general_tools_app:main"
26
- custom_fault_tools = "examples.tools.custom_fault_tools_app:main"
26
+ example-fault-tools = "examples.tools.custom_fault_tools_app:main"
xgae-0.1.15/release.md ADDED
@@ -0,0 +1,12 @@
1
+ # Relese Changelog
2
+ ## [0.1.15] - 2025-9-1
3
+ ### Target
4
+ - Saved for StreamResponser tool_exec_on_stream mode, next release will be abolished
5
+ ### Changed
6
+ - Improve French translation (#377).
7
+ ### Fixed
8
+ - Fix finish_reason judge logic
9
+
10
+ ## [0.1.15] - 2025-8-31
11
+ ### Target
12
+ - First complete version
@@ -10,7 +10,7 @@ from langgraph.graph.message import add_messages
10
10
  from xgae.engine.engine_base import XGATaskResult, XGAResponseMessage
11
11
  from xgae.engine.mcp_tool_box import XGAMcpToolBox
12
12
  from xgae.utils.setup_env import setup_langfuse, setup_logging
13
- from xgae.utils import handle_error
13
+ from xgae.utils import log_trace
14
14
  from xgae.utils.misc import read_file
15
15
  from xgae.engine.task_engine import XGATaskEngine
16
16
 
@@ -183,9 +183,8 @@ class XGAReactAgent:
183
183
 
184
184
  return result
185
185
  except Exception as e:
186
- logging.error("### Error ManagerAgent _agent_work for user_input '%s': %s ###", user_input, str(e))
187
- handle_error(e)
188
- result = XGATaskResult(type="error", content="Never get result, Unexpected Error")
186
+ log_trace(e, f"XReactAgent generate: user_input={user_input}")
187
+ result = XGATaskResult(type="error", content=f"React Agent error: {e}")
189
188
  return result
190
189
 
191
190
 
@@ -0,0 +1,16 @@
1
+ import asyncio
2
+
3
+ from xgae.engine.task_engine import XGATaskEngine
4
+ from xgae.utils.setup_env import setup_logging
5
+
6
+
7
+ setup_logging()
8
+
9
+ async def main() -> None:
10
+ engine = XGATaskEngine(general_tools=["*"])
11
+
12
+ user_input = "This week's gold price"
13
+ final_result = await engine.run_task_with_final_answer(task_message={"role": "user", "content": user_input})
14
+ print("FINAL RESULT:", final_result)
15
+
16
+ asyncio.run(main())
@@ -11,7 +11,7 @@ setup_logging()
11
11
  langfuse = setup_langfuse()
12
12
 
13
13
  async def main() -> None:
14
- # Before Run Exec: uv run custom_fault_tools
14
+ # Before Run Exec: uv run example-fault-tools
15
15
  tool_box = XGAMcpToolBox(custom_mcp_server_file="mcpservers/custom_servers.json")
16
16
  system_prompt = read_file("templates/example/fault_user_prompt.txt")
17
17
 
@@ -9,7 +9,7 @@ from xgae.utils.setup_env import setup_logging
9
9
  setup_logging(log_level="ERROR")
10
10
 
11
11
  async def main() -> None:
12
- # Before Run Exec: uv run custom_fault_tools
12
+ # Before Run Exec: uv run example-fault-tools
13
13
  tool_box = XGAMcpToolBox(custom_mcp_server_file="mcpservers/custom_servers.json")
14
14
  system_prompt = read_file("templates/example/fault_user_prompt.txt")
15
15
 
@@ -89,7 +89,8 @@ def main(transport: str, host: str, port: int, alarmtype:int):
89
89
  if transport != "stdio":
90
90
  from xgae.utils.setup_env import setup_logging
91
91
  setup_logging()
92
- logging.info("=" * 20 + f" Custom Fault Tools Sever Started in {transport} mode " + "=" * 20)
92
+ logging.info("=" * 10 + f" Example Fault Tools Sever Started " + "=" * 10)
93
+ logging.info(f"=== transport={transport}, host={host}, port={port}, alarmtype={alarmtype}")
93
94
 
94
95
  global alarm_type
95
96
  alarm_type = alarmtype
@@ -17,16 +17,17 @@ class XGAMcpToolBox(XGAToolBox):
17
17
  custom_mcp_server_config: Optional[Dict[str, Any]] = None
18
18
  ):
19
19
  general_mcp_server_config = self._load_mcp_servers_config("mcpservers/xga_server.json")
20
- tool_box_mcp_server_config = general_mcp_server_config.get("mcpServers", {})
20
+ tool_box_mcp_server_config = general_mcp_server_config.get('mcpServers', {})
21
21
 
22
22
  if custom_mcp_server_config:
23
23
  tool_box_mcp_server_config.update(custom_mcp_server_config)
24
24
  elif custom_mcp_server_file:
25
25
  custom_mcp_server_config = self._load_mcp_servers_config(custom_mcp_server_file)
26
- custom_mcp_server_config = custom_mcp_server_config.get("mcpServers", {})
26
+ custom_mcp_server_config = custom_mcp_server_config.get('mcpServers', {})
27
27
  tool_box_mcp_server_config.update(custom_mcp_server_config)
28
28
 
29
29
  self._mcp_client = MultiServerMCPClient(tool_box_mcp_server_config)
30
+
30
31
  self.mcp_server_names: List[str] = [server_name for server_name in tool_box_mcp_server_config]
31
32
  self.mcp_tool_schemas: Dict[str, List[XGAToolSchema]] = {}
32
33
  self.task_tool_schemas: Dict[str, Dict[str,XGAToolSchema]] = {}
@@ -113,15 +114,20 @@ class XGAMcpToolBox(XGAToolBox):
113
114
  async with self._mcp_client.session(server_name) as session:
114
115
  tools = await load_mcp_tools(session)
115
116
  mcp_tool = next((t for t in tools if t.name == tool_name), None)
116
-
117
+ is_general_tool = False
117
118
  if mcp_tool:
118
119
  tool_args = args or {}
119
120
  if server_name == self.GENERAL_MCP_SERVER_NAME:
120
121
  tool_args = dict({"task_id": task_id}, **tool_args)
122
+ is_general_tool = True
121
123
 
122
124
  try:
123
125
  tool_result = await mcp_tool.arun(tool_args)
124
- result = XGAToolResult(success=True, output=str(tool_result))
126
+ if is_general_tool:
127
+ tool_result = json.loads(tool_result)
128
+ result = XGAToolResult(success=tool_result['success'], output=str(tool_result['output']))
129
+ else:
130
+ result = XGAToolResult(success=True, output=str(tool_result))
125
131
  except Exception as e:
126
132
  error = f"Call mcp tool '{tool_name}' error: {str(e)}"
127
133
  logging.error(f"McpToolBox call_tool: {error}")
@@ -173,18 +179,18 @@ class XGAMcpToolBox(XGAToolBox):
173
179
  for server_name, server_info in server_config["mcpServers"].items():
174
180
  if "transport" not in server_info:
175
181
  if "url" in server_info:
176
- server_info["transport"] = "streamable_http" if "mcp" in server_info["url"] else "sse"
182
+ server_info['transport'] = "streamable_http" if "mcp" in server_info['url'] else "sse"
177
183
  else:
178
- server_info["transport"] = "stdio"
184
+ server_info['transport'] = "stdio"
179
185
 
180
186
  return server_config
181
187
  else:
182
188
  logging.warning(f"McpToolBox load_mcp_servers_config: MCP servers config file not found at: {mcp_config_path}")
183
- return {"mcpServers": {}}
189
+ return {'mcpServers': {}}
184
190
 
185
191
  except Exception as e:
186
192
  logging.error(f"McpToolBox load_mcp_servers_config: Failed to load MCP servers config: {e}")
187
- return {"mcpServers": {}}
193
+ return {'mcpServers': {}}
188
194
 
189
195
 
190
196
  if __name__ == "__main__":
@@ -195,7 +201,7 @@ if __name__ == "__main__":
195
201
  setup_logging()
196
202
 
197
203
  async def main():
198
- ## Before Run Exec: uv run custom_fault_tools
204
+ ## Before Run Exec: uv run example-fault-tools
199
205
  mcp_tool_box = XGAMcpToolBox(custom_mcp_server_file="mcpservers/custom_servers.json")
200
206
  #mcp_tool_box = XGAMcpToolBox()
201
207
 
@@ -34,19 +34,20 @@ class XGAPromptBuilder():
34
34
  openai_schemas = []
35
35
  for tool_schema in tool_schemas:
36
36
  openai_schema = {}
37
- openai_schema["type"] = "function"
38
37
  openai_function = {}
39
- openai_schema["function"] = openai_function
38
+ openai_schema['type'] = "function"
39
+ openai_schema['function'] = openai_function
40
40
 
41
- openai_function["name"] = tool_schema.tool_name
42
- openai_function["description"] = tool_schema.description if tool_schema.description else 'No description available'
41
+ openai_function['name'] = tool_schema.tool_name
42
+ openai_function['description'] = tool_schema.description if tool_schema.description else 'No description available'
43
43
 
44
44
  openai_parameters = {}
45
+ openai_function['parameters'] = openai_parameters
46
+
45
47
  input_schema = tool_schema.input_schema
46
- openai_function["parameters"] = openai_parameters
47
- openai_parameters["type"] = input_schema["type"]
48
- openai_parameters["properties"] = input_schema.get("properties", {})
49
- openai_parameters["required"] = input_schema["required"]
48
+ openai_parameters['type'] = input_schema['type']
49
+ openai_parameters['properties'] = input_schema.get('properties', {})
50
+ openai_parameters['required'] = input_schema['required']
50
51
 
51
52
  openai_schemas.append(openai_schema)
52
53
 
@@ -2,8 +2,8 @@ import logging
2
2
 
3
3
  from typing import List, Dict, Any, AsyncGenerator, override,Optional
4
4
 
5
- from xgae.utils import handle_error
6
- from xgae.utils.json_helpers import format_for_yield
5
+ from xgae.utils import log_trace
6
+
7
7
 
8
8
  from xgae.engine.responser.responser_base import TaskResponseProcessor, TaskResponserContext, TaskRunContinuousState
9
9
 
@@ -25,7 +25,7 @@ class NonStreamTaskResponser(TaskResponseProcessor):
25
25
  try:
26
26
  if hasattr(llm_response, 'choices') and llm_response.choices:
27
27
  if hasattr(llm_response.choices[0], 'finish_reason'):
28
- finish_reason = llm_response.choices[0].finish_reason
28
+ finish_reason = llm_response.choices[0].finish_reason # LLM finish reason: ‘stop' , 'length'
29
29
  logging.info(f"NonStreamResp: LLM response finish_reason={finish_reason}")
30
30
 
31
31
  response_message = llm_response.choices[0].message if hasattr(llm_response.choices[0], 'message') else None
@@ -35,12 +35,13 @@ class NonStreamTaskResponser(TaskResponseProcessor):
35
35
 
36
36
  parsed_xml_data = self._parse_xml_tool_calls(llm_content)
37
37
  if self.max_xml_tool_calls > 0 and len(parsed_xml_data) > self.max_xml_tool_calls:
38
- logging.warning(f"NonStreamResp: Truncate content, parsed_xml_data length={len(parsed_xml_data)} limit over max_xml_tool_calls={self.max_xml_tool_calls}")
38
+ logging.warning(f"NonStreamResp: Over XML Tool Limit, finish_reason='xml_tool_limit_reached', "
39
+ f"parsed_xml_data_len={len(parsed_xml_data)}")
39
40
  parsed_xml_data = parsed_xml_data[:self.max_xml_tool_calls]
40
41
  finish_reason = "xml_tool_limit_reached"
41
42
 
42
43
  self.root_span.event(name=f"non_stream_processor_start[{self.task_no}]({llm_count})", level="DEFAULT",
43
- status_message=f"finish_reason={finish_reason}, tool_exec_strategy={self.tool_execution_strategy}, "
44
+ status_message=f"finish_reason={finish_reason}, tool_exec_strategy={self.tool_exec_strategy}, "
44
45
  f"parsed_xml_data_len={len(parsed_xml_data)}, llm_content_len={len(llm_content)}")
45
46
 
46
47
  if len(llm_content) == 0:
@@ -52,7 +53,7 @@ class NonStreamTaskResponser(TaskResponseProcessor):
52
53
 
53
54
  tool_calls_to_execute = [item['tool_call'] for item in parsed_xml_data]
54
55
  if len(tool_calls_to_execute) > 0:
55
- tool_results = await self._execute_tools(tool_calls_to_execute, self.tool_execution_strategy)
56
+ tool_results = await self._execute_tools(tool_calls_to_execute, self.tool_exec_strategy)
56
57
 
57
58
  tool_index = 0
58
59
  for i, (returned_tool_call, tool_result) in enumerate(tool_results):
@@ -61,20 +62,19 @@ class NonStreamTaskResponser(TaskResponseProcessor):
61
62
  parsing_details = parsed_xml_item['parsing_details']
62
63
  assistant_msg_id = assistant_msg['message_id'] if assistant_msg else None
63
64
 
64
- tool_context = self._create_tool_context(tool_call, tool_index, assistant_msg_id, parsing_details)
65
- tool_context.result = tool_result
65
+ tool_context = self._create_tool_context(tool_call, tool_index, assistant_msg_id, parsing_details, tool_result)
66
66
 
67
67
  tool_start_msg = self._add_tool_start_message(tool_context)
68
- yield format_for_yield(tool_start_msg)
68
+ yield tool_start_msg
69
69
 
70
70
  tool_message = self._add_tool_messsage(tool_call, tool_result, self.xml_adding_strategy, assistant_msg_id, parsing_details)
71
71
 
72
72
  tool_completed_msg = self._add_tool_completed_message(tool_context, tool_message['message_id'])
73
- yield format_for_yield(tool_completed_msg)
73
+ yield tool_completed_msg
74
74
 
75
- yield format_for_yield(tool_message)
75
+ yield tool_message
76
76
 
77
- if tool_completed_msg["metadata"].get("agent_should_terminate") == "true":
77
+ if tool_context.function_name in ['ask', 'complete']:
78
78
  finish_reason = "completed"
79
79
  break
80
80
 
@@ -86,17 +86,16 @@ class NonStreamTaskResponser(TaskResponseProcessor):
86
86
  if finish_reason:
87
87
  finish_content = {"status_type": "finish", "finish_reason": finish_reason}
88
88
  finish_msg = self.add_response_message(type="status", content=finish_content, is_llm_message=False)
89
- yield format_for_yield(finish_msg)
89
+ yield finish_msg
90
90
  except Exception as e:
91
- logging.error(f"NonStreamResp: Process response llm_content: {llm_content}")
92
- handle_error(e)
91
+ trace = log_trace(e, f"NonStreamResp: Process response llm_content:\n {llm_content}")
93
92
  self.root_span.event(name="non_stream_process_response_error", level="ERROR",
94
93
  status_message=f"Process non-streaming response error: {e}",
95
- metadata={"content": llm_content})
94
+ metadata={"content": llm_content, "trace": trace})
96
95
 
97
96
  content = {"role": "system", "status_type": "error", "message": f"Process non-streaming response error: {e}"}
98
97
  error_msg = self.add_response_message(type="status", content=content, is_llm_message=False)
99
- yield format_for_yield(error_msg)
98
+ yield error_msg
100
99
 
101
100
  raise # Use bare 'raise' to preserve the original exception with its traceback
102
101