xgae 0.1.5__py3-none-any.whl → 0.1.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of xgae might be problematic. Click here for more details.

@@ -6,18 +6,15 @@ class XGAError(Exception):
6
6
  """Custom exception for errors in the XGA system."""
7
7
  pass
8
8
 
9
+ XGAMsgStatusType = Literal["error", "finish", "tool_error", "tool_started", "tool_completed", "tool_failed", "thread_run_start", "thread_run_end", "assistant_response_start", "assistant_response_end"]
10
+ XGAResponseMsgType = Literal["user", "status", "tool", "assistant"]
9
11
 
10
- class XGAResponseMsg(TypedDict, total=False):
11
- type: Literal["user", "status", "tool", "assistant", "assistant_response_end"]
12
- content: Union[Dict[str, Any], List[Any], str]
12
+ class XGAResponseMessage(TypedDict, total=False):
13
+ message_id: str
14
+ type: XGAResponseMsgType
13
15
  is_llm_message: bool
16
+ content: Union[Dict[str, Any], List[Any], str]
14
17
  metadata: Dict[str, Any]
15
- message_id: str
16
- task_id: str
17
- task_run_id: str
18
- trace_id: str
19
- session_id: Optional[str]
20
- agent_id: Optional[str]
21
18
 
22
19
  class XGATaskResult(TypedDict, total=False):
23
20
  type: Literal["ask", "answer", "error"]
@@ -7,8 +7,8 @@ from typing import List, Any, Dict, Optional, Literal, override
7
7
  from langchain_mcp_adapters.client import MultiServerMCPClient
8
8
  from langchain_mcp_adapters.tools import load_mcp_tools
9
9
 
10
- from xgae.engine.xga_base import XGAError, XGAToolSchema, XGAToolBox, XGAToolResult
11
- from xgae.utils.setup_env import langfuse
10
+ from xgae.engine.engine_base import XGAError, XGAToolSchema, XGAToolBox, XGAToolResult
11
+ from xgae.utils import langfuse
12
12
 
13
13
  class XGAMcpToolBox(XGAToolBox):
14
14
  GENERAL_MCP_SERVER_NAME = "xga_general"
@@ -38,7 +38,7 @@ class XGAMcpToolBox(XGAToolBox):
38
38
  async def creat_task_tool_box(self, task_id: str, general_tools: List[str], custom_tools: List[str]):
39
39
  task_tool_schemas = {}
40
40
  general_tool_schemas = self.mcp_tool_schemas.get(XGAMcpToolBox.GENERAL_MCP_SERVER_NAME, {})
41
- if len(general_tools) > 0 and general_tools[0] == "*":
41
+ if "*" in general_tools:
42
42
  task_tool_schemas = {tool_schema.tool_name: tool_schema for tool_schema in general_tool_schemas}
43
43
  else:
44
44
  for tool_schema in general_tool_schemas:
@@ -46,6 +46,12 @@ class XGAMcpToolBox(XGAToolBox):
46
46
  task_tool_schemas[tool_schema.tool_name] = tool_schema
47
47
  task_tool_schemas.pop("end_task", None)
48
48
 
49
+ if len(custom_tools) == 1 and custom_tools[0] == "*":
50
+ custom_tools = []
51
+ for server_name in self.mcp_server_names:
52
+ if server_name != XGAMcpToolBox.GENERAL_MCP_SERVER_NAME:
53
+ custom_tools.append(f"{server_name}.*")
54
+
49
55
  for server_tool_name in custom_tools:
50
56
  parts = server_tool_name.split(".")
51
57
  if len(parts) != 2:
@@ -58,8 +64,8 @@ class XGAMcpToolBox(XGAToolBox):
58
64
  if custom_tool_schemas is None:
59
65
  continue
60
66
  if custom_tool_name == "*":
61
- custom_tool_schema_d = {tool_schema.tool_name: tool_schema for tool_schema in custom_tool_schemas}
62
- task_tool_schemas.update(custom_tool_schema_d)
67
+ custom_tool_schema_dict = {tool_schema.tool_name: tool_schema for tool_schema in custom_tool_schemas}
68
+ task_tool_schemas.update(custom_tool_schema_dict)
63
69
  else:
64
70
  for tool_schema in custom_tool_schemas:
65
71
  if custom_tool_name == tool_schema.tool_name:
@@ -1,9 +1,10 @@
1
1
  import json
2
2
  import datetime
3
+
3
4
  from typing import Optional, List
4
5
 
5
- from xga_base import XGAToolSchema, XGAError
6
- from xgae.utils.utils import read_file, format_file_with_args
6
+ from xgae.engine.engine_base import XGAToolSchema, XGAError
7
+ from xgae.utils.misc import read_file, format_file_with_args
7
8
 
8
9
 
9
10
  class XGAPromptBuilder():
@@ -0,0 +1,108 @@
1
+ import logging
2
+
3
+ from typing import List, Dict, Any, AsyncGenerator, override,Optional
4
+
5
+ from xgae.engine.responser.responser_base import TaskResponseProcessor, TaskResponserContext, TaskRunContinuousState
6
+ from xgae.utils import langfuse
7
+ from xgae.utils.json_helpers import format_for_yield
8
+
9
+ class NonStreamTaskResponser(TaskResponseProcessor):
10
+ def __init__(self, response_context: TaskResponserContext):
11
+ super().__init__(response_context)
12
+
13
+ @override
14
+ async def process_response(self,llm_response: Any,prompt_messages: List[Dict[str, Any]],
15
+ continuous_state: Optional[TaskRunContinuousState] = None) -> AsyncGenerator[Dict[str, Any], None]:
16
+ llm_content = ""
17
+ parsed_xml_data = []
18
+ finish_reason = None
19
+
20
+ try:
21
+ # Extract finish_reason, content, tool calls
22
+ if hasattr(llm_response, 'choices') and llm_response.choices:
23
+ if hasattr(llm_response.choices[0], 'finish_reason'):
24
+ finish_reason = llm_response.choices[0].finish_reason
25
+ logging.info(f"NonStreamTask:LLM response finish_reason={finish_reason}")
26
+
27
+ langfuse.create_event(trace_context=self.trace_context, name="non_stream_processor_start", level="DEFAULT",
28
+ status_message=(f"finish_reason={finish_reason}, tool_exec_strategy={self.tool_execution_strategy}"))
29
+
30
+ response_message = llm_response.choices[0].message if hasattr(llm_response.choices[0], 'message') else None
31
+ if response_message:
32
+ if hasattr(response_message, 'content') and response_message.content:
33
+ llm_content = response_message.content
34
+
35
+ parsed_xml_data = self._parse_xml_tool_calls(llm_content)
36
+ if self.max_xml_tool_calls > 0 and len(parsed_xml_data) > self.max_xml_tool_calls:
37
+ logging.warning(f"NonStreamTask:Truncate content, parsed_xml_data length {len(parsed_xml_data)} limit over max_xml_tool_calls={self.max_xml_tool_calls}")
38
+ xml_chunks = self._extract_xml_chunks(llm_content)[:self.max_xml_tool_calls]
39
+ if xml_chunks:
40
+ last_chunk = xml_chunks[-1]
41
+ last_chunk_pos = llm_content.find(last_chunk)
42
+ if last_chunk_pos >= 0:
43
+ llm_content = llm_content[:last_chunk_pos + len(last_chunk)]
44
+ parsed_xml_data = parsed_xml_data[:self.max_xml_tool_calls]
45
+ finish_reason = "xml_tool_limit_reached"
46
+ logging.info(f"NonStreamTask:LLM response finish_reason={finish_reason}")
47
+ else:
48
+ logging.warning(f"NonStreamTask:LLM response_message is empty")
49
+
50
+ message_data = {"role": "assistant", "content": llm_content, "index": -1} # index=-1, full llm_content
51
+ assistant_msg = self.add_response_message(type="assistant", content=message_data, is_llm_message=True)
52
+ yield assistant_msg
53
+
54
+ tool_calls_to_execute = [item['tool_call'] for item in parsed_xml_data]
55
+ if len(tool_calls_to_execute) > 0:
56
+ logging.info(f"NonStreamTask:Executing {len(tool_calls_to_execute)} tools with strategy: {self.tool_execution_strategy}")
57
+
58
+ tool_results = await self._execute_tools(tool_calls_to_execute, self.tool_execution_strategy)
59
+
60
+ tool_index = 0
61
+ for i, (returned_tool_call, tool_result) in enumerate(tool_results):
62
+ parsed_xml_item = parsed_xml_data[i]
63
+ tool_call = parsed_xml_item['tool_call']
64
+ parsing_details = parsed_xml_item['parsing_details']
65
+ assistant_msg_id = assistant_msg['message_id'] if assistant_msg else None
66
+
67
+ tool_context = self._create_tool_context(tool_call, tool_index, assistant_msg_id, parsing_details)
68
+ tool_context.result = tool_result
69
+
70
+ tool_start_msg = self._add_tool_start_message(tool_context)
71
+ yield format_for_yield(tool_start_msg)
72
+
73
+ tool_message = self._add_tool_messsage(tool_call, tool_result, self.xml_adding_strategy, assistant_msg_id, parsing_details)
74
+
75
+ tool_completed_msg = self._add_tool_completed_message(tool_context, tool_message['message_id'])
76
+ yield format_for_yield(tool_completed_msg)
77
+
78
+ yield format_for_yield(tool_message)
79
+
80
+ if tool_completed_msg["metadata"].get("agent_should_terminate") == "true":
81
+ finish_reason = "completed"
82
+ break
83
+ tool_index += 1
84
+
85
+ if finish_reason:
86
+ finish_content = {"status_type": "finish", "finish_reason": finish_reason}
87
+ finish_msg_obj = self.add_response_message(type="status", content=finish_content, is_llm_message=False)
88
+ if finish_msg_obj:
89
+ yield format_for_yield(finish_msg_obj)
90
+
91
+ except Exception as e:
92
+ logging.error(f"NonStreamTask: Error processing non-streaming response: {llm_content}")
93
+ langfuse.create_event(trace_context=self.trace_context, name="error_processing_non_streaming_response", level="ERROR",
94
+ status_message=(f"Error processing non-streaming response: {str(e)}"))
95
+
96
+ content = {"role": "system", "status_type": "error", "message": str(e)}
97
+ err_msg = self.add_response_message(ype="status", content=content,is_llm_message=False)
98
+ if err_msg:
99
+ yield format_for_yield(err_msg)
100
+
101
+ # Re-raise the same exception (not a new one) to ensure proper error propagation
102
+ logging.critical(f"NonStreamTask: Re-raising error to stop further processing: {str(e)}")
103
+ langfuse.create_event(trace_context=self.trace_context, name="re_raising_error_to_stop_further_processing", level="CRITICAL",
104
+ status_message=(f"Re-raising error to stop further processing: {str(e)}"))
105
+ raise # Use bare 'raise' to preserve the original exception with its traceback
106
+
107
+
108
+