xgae 0.1.4__py3-none-any.whl → 0.1.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of xgae might be problematic. Click here for more details.

@@ -6,24 +6,20 @@ class XGAError(Exception):
6
6
  """Custom exception for errors in the XGA system."""
7
7
  pass
8
8
 
9
+ XGAMsgStatusType = Literal["error", "finish", "tool_error", "tool_started", "tool_completed", "tool_failed", "thread_run_start", "thread_run_end", "assistant_response_start", "assistant_response_end"]
10
+ XGAResponseMsgType = Literal["user", "status", "tool", "assistant"]
9
11
 
10
- class XGAContextMsg(TypedDict, total=False):
11
- type: Literal["user", "status", "tool", "assistant", "assistant_response_end"]
12
- content: Union[Dict[str, Any], List[Any], str]
13
- is_llm_message: bool
14
- metadata: Dict[str, Any]
12
+ class XGAResponseMessage(TypedDict, total=False):
15
13
  message_id: str
16
- session_id: str
17
- agent_id: str
18
- task_id: str
19
- task_run_id: str
20
- trace_id: str
21
-
22
- class XGAResponseMsg(TypedDict, total=False):
23
- type: Literal["content", "status"]
14
+ type: XGAResponseMsgType
15
+ is_llm_message: bool
24
16
  content: Union[Dict[str, Any], List[Any], str]
25
- status: Literal["error", "status"]
26
- message: str
17
+ metadata: Dict[str, Any]
18
+
19
+ class XGATaskResult(TypedDict, total=False):
20
+ type: Literal["ask", "answer", "error"]
21
+ content: str
22
+ attachments: Optional[List[str]]
27
23
 
28
24
  @dataclass
29
25
  class XGAToolSchema:
@@ -39,6 +35,7 @@ class XGAToolResult:
39
35
  success: bool
40
36
  output: str
41
37
 
38
+
42
39
  class XGAToolBox(ABC):
43
40
  @abstractmethod
44
41
  async def creat_task_tool_box(self, task_id: str, general_tools: List[str], custom_tools: List[str]):
@@ -7,8 +7,8 @@ from typing import List, Any, Dict, Optional, Literal, override
7
7
  from langchain_mcp_adapters.client import MultiServerMCPClient
8
8
  from langchain_mcp_adapters.tools import load_mcp_tools
9
9
 
10
- from xgae.engine.xga_base import XGAError, XGAToolSchema, XGAToolBox, XGAToolResult
11
- from xgae.utils.setup_env import langfuse
10
+ from xgae.engine.engine_base import XGAError, XGAToolSchema, XGAToolBox, XGAToolResult
11
+ from xgae.utils import langfuse
12
12
 
13
13
  class XGAMcpToolBox(XGAToolBox):
14
14
  GENERAL_MCP_SERVER_NAME = "xga_general"
@@ -38,7 +38,7 @@ class XGAMcpToolBox(XGAToolBox):
38
38
  async def creat_task_tool_box(self, task_id: str, general_tools: List[str], custom_tools: List[str]):
39
39
  task_tool_schemas = {}
40
40
  general_tool_schemas = self.mcp_tool_schemas.get(XGAMcpToolBox.GENERAL_MCP_SERVER_NAME, {})
41
- if len(general_tools) > 0 and general_tools[0] == "*":
41
+ if "*" in general_tools:
42
42
  task_tool_schemas = {tool_schema.tool_name: tool_schema for tool_schema in general_tool_schemas}
43
43
  else:
44
44
  for tool_schema in general_tool_schemas:
@@ -58,8 +58,8 @@ class XGAMcpToolBox(XGAToolBox):
58
58
  if custom_tool_schemas is None:
59
59
  continue
60
60
  if custom_tool_name == "*":
61
- custom_tool_schema_d = {tool_schema.tool_name: tool_schema for tool_schema in custom_tool_schemas}
62
- task_tool_schemas.update(custom_tool_schema_d)
61
+ custom_tool_schema_dict = {tool_schema.tool_name: tool_schema for tool_schema in custom_tool_schemas}
62
+ task_tool_schemas.update(custom_tool_schema_dict)
63
63
  else:
64
64
  for tool_schema in custom_tool_schemas:
65
65
  if custom_tool_name == tool_schema.tool_name:
@@ -112,10 +112,7 @@ class XGAMcpToolBox(XGAToolBox):
112
112
  if mcp_tool:
113
113
  tool_args = args or {}
114
114
  if server_name == self.GENERAL_MCP_SERVER_NAME:
115
- pass
116
- #tool_args["task_id"] = task_id #xga general tool, first param must be task_id
117
- else:
118
- tool_args = args
115
+ tool_args = dict({"task_id": task_id}, **tool_args)
119
116
 
120
117
  try:
121
118
  tool_result = await mcp_tool.arun(tool_args)
@@ -1,9 +1,10 @@
1
1
  import json
2
2
  import datetime
3
+
3
4
  from typing import Optional, List
4
5
 
5
- from xga_base import XGAToolSchema, XGAError
6
- from xgae.utils.utils import read_file, format_file_with_args
6
+ from engine_base import XGAToolSchema, XGAError
7
+ from xgae.utils.misc import read_file, format_file_with_args
7
8
 
8
9
 
9
10
  class XGAPromptBuilder():
@@ -0,0 +1,110 @@
1
+ import logging
2
+
3
+ from typing import List, Dict, Any, AsyncGenerator, override,Optional
4
+
5
+ from xgae.engine.responser.responser_base import TaskResponseProcessor, TaskResponserContext, TaskRunContinuousState
6
+ from xgae.utils import langfuse
7
+ from xgae.utils.json_helpers import format_for_yield
8
+
9
+ class NonStreamTaskResponser(TaskResponseProcessor):
10
+ def __init__(self, response_context: TaskResponserContext):
11
+ super().__init__(response_context)
12
+
13
+ @override
14
+ async def process_response(self,llm_response: Any,prompt_messages: List[Dict[str, Any]],
15
+ continuous_state: Optional[TaskRunContinuousState] = None) -> AsyncGenerator[Dict[str, Any], None]:
16
+ llm_content = ""
17
+ parsed_xml_data = []
18
+ finish_reason = None
19
+
20
+ try:
21
+ # Extract finish_reason, content, tool calls
22
+ if hasattr(llm_response, 'choices') and llm_response.choices:
23
+ if hasattr(llm_response.choices[0], 'finish_reason'):
24
+ finish_reason = llm_response.choices[0].finish_reason
25
+ logging.info(f"NonStreamTask:LLM response finish_reason={finish_reason}")
26
+
27
+ langfuse.create_event(trace_context=self.trace_context, name="non_streaming_finish_reason", level="DEFAULT",
28
+ status_message=(f"Non-streaming finish_reason: {finish_reason}"))
29
+
30
+ response_message = llm_response.choices[0].message if hasattr(llm_response.choices[0], 'message') else None
31
+ if response_message:
32
+ if hasattr(response_message, 'content') and response_message.content:
33
+ llm_content = response_message.content
34
+
35
+ parsed_xml_data = self._parse_xml_tool_calls(llm_content)
36
+ if self.max_xml_tool_calls > 0 and len(parsed_xml_data) > self.max_xml_tool_calls:
37
+ logging.warning(f"NonStreamTask:Truncate content, parsed_xml_data length {len(parsed_xml_data)} limit over max_xml_tool_calls={self.max_xml_tool_calls}")
38
+ xml_chunks = self._extract_xml_chunks(llm_content)[:self.max_xml_tool_calls]
39
+ if xml_chunks:
40
+ last_chunk = xml_chunks[-1]
41
+ last_chunk_pos = llm_content.find(last_chunk)
42
+ if last_chunk_pos >= 0:
43
+ llm_content = llm_content[:last_chunk_pos + len(last_chunk)]
44
+ parsed_xml_data = parsed_xml_data[:self.max_xml_tool_calls]
45
+ finish_reason = "xml_tool_limit_reached"
46
+ logging.info(f"NonStreamTask:LLM response finish_reason={finish_reason}")
47
+ else:
48
+ logging.warning(f"NonStreamTask:LLM response_message is empty")
49
+
50
+ message_data = {"role": "assistant", "content": llm_content, "index": -1} # index=-1, full llm_content
51
+ assistant_msg = self.add_response_message(type="assistant", content=message_data, is_llm_message=True)
52
+ yield assistant_msg
53
+
54
+ tool_calls_to_execute = [item['tool_call'] for item in parsed_xml_data]
55
+ if len(tool_calls_to_execute) > 0:
56
+ logging.info(f"NonStreamTask:Executing {len(tool_calls_to_execute)} tools with strategy: {self.tool_execution_strategy}")
57
+ langfuse.create_event(trace_context=self.trace_context, name="executing_tools_with_strategy", level="DEFAULT", status_message=(
58
+ f"NonStreamTask Executing {len(tool_calls_to_execute)} tools with strategy: {self.tool_execution_strategy}"))
59
+
60
+ tool_results = await self._execute_tools(tool_calls_to_execute, self.tool_execution_strategy)
61
+
62
+ tool_index = 0
63
+ for i, (returned_tool_call, tool_result) in enumerate(tool_results):
64
+ parsed_xml_item = parsed_xml_data[i]
65
+ tool_call = parsed_xml_item['tool_call']
66
+ parsing_details = parsed_xml_item['parsing_details']
67
+ assistant_msg_id = assistant_msg['message_id'] if assistant_msg else None
68
+
69
+ tool_context = self._create_tool_context(tool_call, tool_index, assistant_msg_id, parsing_details)
70
+ tool_context.result = tool_result
71
+
72
+ tool_start_msg = self._add_tool_start_message(tool_context)
73
+ yield format_for_yield(tool_start_msg)
74
+
75
+ tool_message = self._add_tool_messsage(tool_call, tool_result, self.xml_adding_strategy, assistant_msg_id, parsing_details)
76
+
77
+ tool_completed_msg = self._add_tool_completed_message(tool_context, tool_message['message_id'])
78
+ yield format_for_yield(tool_completed_msg)
79
+
80
+ yield format_for_yield(tool_message)
81
+
82
+ if tool_completed_msg["metadata"].get("agent_should_terminate") == "true":
83
+ finish_reason = "completed"
84
+ break
85
+ tool_index += 1
86
+
87
+ if finish_reason:
88
+ finish_content = {"status_type": "finish", "finish_reason": finish_reason}
89
+ finish_msg_obj = self.add_response_message(type="status", content=finish_content, is_llm_message=False)
90
+ if finish_msg_obj:
91
+ yield format_for_yield(finish_msg_obj)
92
+
93
+ except Exception as e:
94
+ logging.error(f"NonStreamTask: Error processing non-streaming response: {llm_content}")
95
+ langfuse.create_event(trace_context=self.trace_context, name="error_processing_non_streaming_response", level="ERROR",
96
+ status_message=(f"Error processing non-streaming response: {str(e)}"))
97
+
98
+ content = {"role": "system", "status_type": "error", "message": str(e)}
99
+ err_msg = self.add_response_message(ype="status", content=content,is_llm_message=False)
100
+ if err_msg:
101
+ yield format_for_yield(err_msg)
102
+
103
+ # Re-raise the same exception (not a new one) to ensure proper error propagation
104
+ logging.critical(f"NonStreamTask: Re-raising error to stop further processing: {str(e)}")
105
+ langfuse.create_event(trace_context=self.trace_context, name="re_raising_error_to_stop_further_processing", level="CRITICAL",
106
+ status_message=(f"Re-raising error to stop further processing: {str(e)}"))
107
+ raise # Use bare 'raise' to preserve the original exception with its traceback
108
+
109
+
110
+