xgae 0.1.13__py3-none-any.whl → 0.1.15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of xgae might be problematic. Click here for more details.

@@ -17,16 +17,17 @@ class XGAMcpToolBox(XGAToolBox):
17
17
  custom_mcp_server_config: Optional[Dict[str, Any]] = None
18
18
  ):
19
19
  general_mcp_server_config = self._load_mcp_servers_config("mcpservers/xga_server.json")
20
- tool_box_mcp_server_config = general_mcp_server_config.get("mcpServers", {})
20
+ tool_box_mcp_server_config = general_mcp_server_config.get('mcpServers', {})
21
21
 
22
22
  if custom_mcp_server_config:
23
23
  tool_box_mcp_server_config.update(custom_mcp_server_config)
24
24
  elif custom_mcp_server_file:
25
25
  custom_mcp_server_config = self._load_mcp_servers_config(custom_mcp_server_file)
26
- custom_mcp_server_config = custom_mcp_server_config.get("mcpServers", {})
26
+ custom_mcp_server_config = custom_mcp_server_config.get('mcpServers', {})
27
27
  tool_box_mcp_server_config.update(custom_mcp_server_config)
28
28
 
29
29
  self._mcp_client = MultiServerMCPClient(tool_box_mcp_server_config)
30
+
30
31
  self.mcp_server_names: List[str] = [server_name for server_name in tool_box_mcp_server_config]
31
32
  self.mcp_tool_schemas: Dict[str, List[XGAToolSchema]] = {}
32
33
  self.task_tool_schemas: Dict[str, Dict[str,XGAToolSchema]] = {}
@@ -178,18 +179,18 @@ class XGAMcpToolBox(XGAToolBox):
178
179
  for server_name, server_info in server_config["mcpServers"].items():
179
180
  if "transport" not in server_info:
180
181
  if "url" in server_info:
181
- server_info["transport"] = "streamable_http" if "mcp" in server_info["url"] else "sse"
182
+ server_info['transport'] = "streamable_http" if "mcp" in server_info['url'] else "sse"
182
183
  else:
183
- server_info["transport"] = "stdio"
184
+ server_info['transport'] = "stdio"
184
185
 
185
186
  return server_config
186
187
  else:
187
188
  logging.warning(f"McpToolBox load_mcp_servers_config: MCP servers config file not found at: {mcp_config_path}")
188
- return {"mcpServers": {}}
189
+ return {'mcpServers': {}}
189
190
 
190
191
  except Exception as e:
191
192
  logging.error(f"McpToolBox load_mcp_servers_config: Failed to load MCP servers config: {e}")
192
- return {"mcpServers": {}}
193
+ return {'mcpServers': {}}
193
194
 
194
195
 
195
196
  if __name__ == "__main__":
@@ -200,7 +201,7 @@ if __name__ == "__main__":
200
201
  setup_logging()
201
202
 
202
203
  async def main():
203
- ## Before Run Exec: uv run custom_fault_tools
204
+ ## Before Run Exec: uv run example-fault-tools
204
205
  mcp_tool_box = XGAMcpToolBox(custom_mcp_server_file="mcpservers/custom_servers.json")
205
206
  #mcp_tool_box = XGAMcpToolBox()
206
207
 
@@ -34,19 +34,20 @@ class XGAPromptBuilder():
34
34
  openai_schemas = []
35
35
  for tool_schema in tool_schemas:
36
36
  openai_schema = {}
37
- openai_schema["type"] = "function"
38
37
  openai_function = {}
39
- openai_schema["function"] = openai_function
38
+ openai_schema['type'] = "function"
39
+ openai_schema['function'] = openai_function
40
40
 
41
- openai_function["name"] = tool_schema.tool_name
42
- openai_function["description"] = tool_schema.description if tool_schema.description else 'No description available'
41
+ openai_function['name'] = tool_schema.tool_name
42
+ openai_function['description'] = tool_schema.description if tool_schema.description else 'No description available'
43
43
 
44
44
  openai_parameters = {}
45
+ openai_function['parameters'] = openai_parameters
46
+
45
47
  input_schema = tool_schema.input_schema
46
- openai_function["parameters"] = openai_parameters
47
- openai_parameters["type"] = input_schema["type"]
48
- openai_parameters["properties"] = input_schema.get("properties", {})
49
- openai_parameters["required"] = input_schema["required"]
48
+ openai_parameters['type'] = input_schema['type']
49
+ openai_parameters['properties'] = input_schema.get('properties', {})
50
+ openai_parameters['required'] = input_schema['required']
50
51
 
51
52
  openai_schemas.append(openai_schema)
52
53
 
@@ -3,7 +3,7 @@ import logging
3
3
  from typing import List, Dict, Any, AsyncGenerator, override,Optional
4
4
 
5
5
  from xgae.utils import log_trace
6
- from xgae.utils.json_helpers import format_for_yield
6
+
7
7
 
8
8
  from xgae.engine.responser.responser_base import TaskResponseProcessor, TaskResponserContext, TaskRunContinuousState
9
9
 
@@ -25,7 +25,7 @@ class NonStreamTaskResponser(TaskResponseProcessor):
25
25
  try:
26
26
  if hasattr(llm_response, 'choices') and llm_response.choices:
27
27
  if hasattr(llm_response.choices[0], 'finish_reason'):
28
- finish_reason = llm_response.choices[0].finish_reason
28
+ finish_reason = llm_response.choices[0].finish_reason # LLM finish reason: ‘stop' , 'length'
29
29
  logging.info(f"NonStreamResp: LLM response finish_reason={finish_reason}")
30
30
 
31
31
  response_message = llm_response.choices[0].message if hasattr(llm_response.choices[0], 'message') else None
@@ -35,12 +35,13 @@ class NonStreamTaskResponser(TaskResponseProcessor):
35
35
 
36
36
  parsed_xml_data = self._parse_xml_tool_calls(llm_content)
37
37
  if self.max_xml_tool_calls > 0 and len(parsed_xml_data) > self.max_xml_tool_calls:
38
- logging.warning(f"NonStreamResp: Truncate content, parsed_xml_data length={len(parsed_xml_data)} limit over max_xml_tool_calls={self.max_xml_tool_calls}")
38
+ logging.warning(f"NonStreamResp: Over XML Tool Limit, finish_reason='xml_tool_limit_reached', "
39
+ f"parsed_xml_data_len={len(parsed_xml_data)}")
39
40
  parsed_xml_data = parsed_xml_data[:self.max_xml_tool_calls]
40
41
  finish_reason = "xml_tool_limit_reached"
41
42
 
42
43
  self.root_span.event(name=f"non_stream_processor_start[{self.task_no}]({llm_count})", level="DEFAULT",
43
- status_message=f"finish_reason={finish_reason}, tool_exec_strategy={self.tool_execution_strategy}, "
44
+ status_message=f"finish_reason={finish_reason}, tool_exec_strategy={self.tool_exec_strategy}, "
44
45
  f"parsed_xml_data_len={len(parsed_xml_data)}, llm_content_len={len(llm_content)}")
45
46
 
46
47
  if len(llm_content) == 0:
@@ -52,7 +53,7 @@ class NonStreamTaskResponser(TaskResponseProcessor):
52
53
 
53
54
  tool_calls_to_execute = [item['tool_call'] for item in parsed_xml_data]
54
55
  if len(tool_calls_to_execute) > 0:
55
- tool_results = await self._execute_tools(tool_calls_to_execute, self.tool_execution_strategy)
56
+ tool_results = await self._execute_tools(tool_calls_to_execute, self.tool_exec_strategy)
56
57
 
57
58
  tool_index = 0
58
59
  for i, (returned_tool_call, tool_result) in enumerate(tool_results):
@@ -61,20 +62,19 @@ class NonStreamTaskResponser(TaskResponseProcessor):
61
62
  parsing_details = parsed_xml_item['parsing_details']
62
63
  assistant_msg_id = assistant_msg['message_id'] if assistant_msg else None
63
64
 
64
- tool_context = self._create_tool_context(tool_call, tool_index, assistant_msg_id, parsing_details)
65
- tool_context.result = tool_result
65
+ tool_context = self._create_tool_context(tool_call, tool_index, assistant_msg_id, parsing_details, tool_result)
66
66
 
67
67
  tool_start_msg = self._add_tool_start_message(tool_context)
68
- yield format_for_yield(tool_start_msg)
68
+ yield tool_start_msg
69
69
 
70
70
  tool_message = self._add_tool_messsage(tool_call, tool_result, self.xml_adding_strategy, assistant_msg_id, parsing_details)
71
71
 
72
72
  tool_completed_msg = self._add_tool_completed_message(tool_context, tool_message['message_id'])
73
- yield format_for_yield(tool_completed_msg)
73
+ yield tool_completed_msg
74
74
 
75
- yield format_for_yield(tool_message)
75
+ yield tool_message
76
76
 
77
- if tool_completed_msg["metadata"].get("agent_should_terminate") == "true":
77
+ if tool_context.function_name in ['ask', 'complete']:
78
78
  finish_reason = "completed"
79
79
  break
80
80
 
@@ -86,7 +86,7 @@ class NonStreamTaskResponser(TaskResponseProcessor):
86
86
  if finish_reason:
87
87
  finish_content = {"status_type": "finish", "finish_reason": finish_reason}
88
88
  finish_msg = self.add_response_message(type="status", content=finish_content, is_llm_message=False)
89
- yield format_for_yield(finish_msg)
89
+ yield finish_msg
90
90
  except Exception as e:
91
91
  trace = log_trace(e, f"NonStreamResp: Process response llm_content:\n {llm_content}")
92
92
  self.root_span.event(name="non_stream_process_response_error", level="ERROR",
@@ -95,7 +95,7 @@ class NonStreamTaskResponser(TaskResponseProcessor):
95
95
 
96
96
  content = {"role": "system", "status_type": "error", "message": f"Process non-streaming response error: {e}"}
97
97
  error_msg = self.add_response_message(type="status", content=content, is_llm_message=False)
98
- yield format_for_yield(error_msg)
98
+ yield error_msg
99
99
 
100
100
  raise # Use bare 'raise' to preserve the original exception with its traceback
101
101