xgae 0.1.6__py3-none-any.whl → 0.1.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of xgae might be problematic. Click here for more details.

@@ -46,6 +46,12 @@ class XGAMcpToolBox(XGAToolBox):
46
46
  task_tool_schemas[tool_schema.tool_name] = tool_schema
47
47
  task_tool_schemas.pop("end_task", None)
48
48
 
49
+ if len(custom_tools) == 1 and custom_tools[0] == "*":
50
+ custom_tools = []
51
+ for server_name in self.mcp_server_names:
52
+ if server_name != XGAMcpToolBox.GENERAL_MCP_SERVER_NAME:
53
+ custom_tools.append(f"{server_name}.*")
54
+
49
55
  for server_tool_name in custom_tools:
50
56
  parts = server_tool_name.split(".")
51
57
  if len(parts) != 2:
@@ -3,7 +3,7 @@ import datetime
3
3
 
4
4
  from typing import Optional, List
5
5
 
6
- from engine_base import XGAToolSchema, XGAError
6
+ from xgae.engine.engine_base import XGAToolSchema, XGAError
7
7
  from xgae.utils.misc import read_file, format_file_with_args
8
8
 
9
9
 
@@ -24,8 +24,8 @@ class NonStreamTaskResponser(TaskResponseProcessor):
24
24
  finish_reason = llm_response.choices[0].finish_reason
25
25
  logging.info(f"NonStreamTask:LLM response finish_reason={finish_reason}")
26
26
 
27
- langfuse.create_event(trace_context=self.trace_context, name="non_streaming_finish_reason", level="DEFAULT",
28
- status_message=(f"Non-streaming finish_reason: {finish_reason}"))
27
+ langfuse.create_event(trace_context=self.trace_context, name="non_stream_processor_start", level="DEFAULT",
28
+ status_message=(f"finish_reason={finish_reason}, tool_exec_strategy={self.tool_execution_strategy}"))
29
29
 
30
30
  response_message = llm_response.choices[0].message if hasattr(llm_response.choices[0], 'message') else None
31
31
  if response_message:
@@ -54,8 +54,6 @@ class NonStreamTaskResponser(TaskResponseProcessor):
54
54
  tool_calls_to_execute = [item['tool_call'] for item in parsed_xml_data]
55
55
  if len(tool_calls_to_execute) > 0:
56
56
  logging.info(f"NonStreamTask:Executing {len(tool_calls_to_execute)} tools with strategy: {self.tool_execution_strategy}")
57
- langfuse.create_event(trace_context=self.trace_context, name="executing_tools_with_strategy", level="DEFAULT", status_message=(
58
- f"NonStreamTask Executing {len(tool_calls_to_execute)} tools with strategy: {self.tool_execution_strategy}"))
59
57
 
60
58
  tool_results = await self._execute_tools(tool_calls_to_execute, self.tool_execution_strategy)
61
59
 
@@ -22,6 +22,7 @@ class TaskResponserContext(TypedDict, total=False):
22
22
  task_id: str
23
23
  task_run_id: str
24
24
  trace_id: str
25
+ root_span_id: str
25
26
  model_name: str
26
27
  max_xml_tool_calls: int # LLM generate max_xml_tool limit, 0 is no limit
27
28
  add_response_msg_func: Callable
@@ -34,7 +35,7 @@ class TaskRunContinuousState(TypedDict, total=False):
34
35
  accumulated_content: str
35
36
  auto_continue_count: int
36
37
  auto_continue: bool
37
- max_auto_run: int
38
+
38
39
 
39
40
  @dataclass
40
41
  class ToolExecutionContext:
@@ -61,7 +62,7 @@ class TaskResponseProcessor(ABC):
61
62
 
62
63
  self.trace_context = {
63
64
  "trace_id": self.response_context.get("trace_id"),
64
- "parent_span_id": None
65
+ "parent_span_id": self.response_context.get("root_span_id"),
65
66
  }
66
67
 
67
68
  self.add_response_message = response_context.get("add_response_msg_func")
@@ -257,38 +258,36 @@ class TaskResponseProcessor(ABC):
257
258
 
258
259
  async def _execute_tool(self, tool_call: Dict[str, Any]) -> XGAToolResult:
259
260
  """Execute a single tool call and return the result."""
260
- span = langfuse.start_span(trace_context=self.trace_context, name=f"execute_tool.{tool_call['function_name']}", input=tool_call["arguments"])
261
- self.trace_context["parent_span_id"] = span.id
262
- try:
263
- function_name = tool_call["function_name"]
264
- arguments = tool_call["arguments"]
265
-
266
- logging.info(f"Executing tool: {function_name} with arguments: {arguments}")
267
- langfuse.create_event(trace_context=self.trace_context, name="executing_tool", level="DEFAULT",
268
- status_message=(f"Executing tool: {function_name} with arguments: {arguments}"))
269
-
270
- if isinstance(arguments, str):
271
- try:
272
- arguments = safe_json_parse(arguments)
273
- except json.JSONDecodeError:
274
- arguments = {"text": arguments} # @todo modify
275
-
276
- result = None
277
- available_tool_names = self.tool_box.get_task_tool_names(self.task_id)
278
- if function_name in available_tool_names:
279
- result = await self.tool_box.call_tool(self.task_id, function_name, arguments)
280
- else:
281
- logging.error(f"Tool function '{function_name}' not found in registry")
282
- result = XGAToolResult(success=False, output=f"Tool function '{function_name}' not found")
283
- logging.info(f"Tool execution complete: {function_name} -> {result}")
284
- langfuse.update_current_span(status_message="tool_executed", output=result)
285
-
286
- return result
287
- except Exception as e:
288
- logging.error(f"Error executing tool {tool_call['function_name']}: {str(e)}", exc_info=True)
261
+ with langfuse.start_as_current_span(trace_context=self.trace_context, name=f"execute_tool.{tool_call['function_name']}", input=tool_call["arguments"]
262
+ ) as exec_tool_span:
263
+ try:
264
+ function_name = tool_call["function_name"]
265
+ arguments = tool_call["arguments"]
266
+
267
+ logging.info(f"Executing tool: {function_name} with arguments: {arguments}")
268
+
269
+ if isinstance(arguments, str):
270
+ try:
271
+ arguments = safe_json_parse(arguments)
272
+ except json.JSONDecodeError:
273
+ arguments = {"text": arguments} # @todo modify
274
+
275
+ result = None
276
+ available_tool_names = self.tool_box.get_task_tool_names(self.task_id)
277
+ if function_name in available_tool_names:
278
+ result = await self.tool_box.call_tool(self.task_id, function_name, arguments)
279
+ else:
280
+ logging.error(f"Tool function '{function_name}' not found in registry")
281
+ result = XGAToolResult(success=False, output=f"Tool function '{function_name}' not found")
282
+ logging.info(f"Tool execution complete: {function_name} -> {result}")
283
+ exec_tool_span.update(status_message="tool_executed", output=result)
284
+
285
+ return result
286
+ except Exception as e:
287
+ logging.error(f"Error executing tool {tool_call['function_name']}: {str(e)}", exc_info=True)
289
288
 
290
- langfuse.update_current_span(status_message="tool_execution_error", output=f"Error executing tool: {str(e)}", level="ERROR")
291
- return XGAToolResult(success=False, output=f"Error executing tool: {str(e)}")
289
+ exec_tool_span.update(status_message="tool_execution_error", output=f"Error executing tool: {str(e)}", level="ERROR")
290
+ return XGAToolResult(success=False, output=f"Error executing tool: {str(e)}")
292
291
 
293
292
  async def _execute_tools(
294
293
  self,
@@ -296,8 +295,6 @@ class TaskResponseProcessor(ABC):
296
295
  execution_strategy: ToolExecutionStrategy = "sequential"
297
296
  ) -> List[Tuple[Dict[str, Any], XGAToolResult]]:
298
297
  logging.info(f"Executing {len(tool_calls)} tools with strategy: {execution_strategy}")
299
- langfuse.create_event(trace_context=self.trace_context, name="executing_tools_with_strategy", level="DEFAULT",
300
- status_message=(f"Executing {len(tool_calls)} tools with strategy: {execution_strategy}"))
301
298
 
302
299
  if execution_strategy == "sequential":
303
300
  return await self._execute_tools_sequentially(tool_calls)
@@ -340,9 +337,8 @@ class TaskResponseProcessor(ABC):
340
337
  # Check if this is a terminating tool (ask or complete)
341
338
  if tool_name in ['ask', 'complete']:
342
339
  logging.info(f"Terminating tool '{tool_name}' executed. Stopping further tool execution.")
343
- langfuse.create_event(trace_context=self.trace_context, name="terminating_tool_executed",
344
- level="DEFAULT", status_message=(
345
- f"Terminating tool '{tool_name}' executed. Stopping further tool execution."))
340
+ # langfuse.create_event(trace_context=self.trace_context, name="terminating_tool_executed",
341
+ # level="DEFAULT", status_message=(f"Terminating tool '{tool_name}' executed. Stopping further tool execution."))
346
342
  break # Stop executing remaining tools
347
343
 
348
344
  except Exception as e:
@@ -353,9 +349,8 @@ class TaskResponseProcessor(ABC):
353
349
  results.append((tool_call, error_result))
354
350
 
355
351
  logging.info(f"Sequential execution completed for {len(results)} tools (out of {len(tool_calls)} total)")
356
- langfuse.create_event(trace_context=self.trace_context, name="sequential_execution_completed", level="DEFAULT",
357
- status_message=(
358
- f"Sequential execution completed for {len(results)} tools (out of {len(tool_calls)} total)"))
352
+ # langfuse.create_event(trace_context=self.trace_context, name="sequential_execution_completed", level="DEFAULT",
353
+ # status_message=(f"Sequential execution completed for {len(results)} tools (out of {len(tool_calls)} total)"))
359
354
  return results
360
355
 
361
356
 
@@ -366,8 +361,8 @@ class TaskResponseProcessor(ABC):
366
361
  try:
367
362
  tool_names = [t.get('function_name', 'unknown') for t in tool_calls]
368
363
  logging.info(f"Executing {len(tool_calls)} tools in parallel: {tool_names}")
369
- langfuse.create_event(trace_context=self.trace_context, name="executing_tools_in_parallel", level="DEFAULT",
370
- status_message=(f"Executing {len(tool_calls)} tools in parallel: {tool_names}"))
364
+ # langfuse.create_event(trace_context=self.trace_context, name="executing_tools_in_parallel", level="DEFAULT",
365
+ # status_message=(f"Executing {len(tool_calls)} tools in parallel: {tool_names}"))
371
366
 
372
367
  # Create tasks for all tool calls
373
368
  tasks = [self._execute_tool(tool_call) for tool_call in tool_calls]
@@ -389,8 +384,8 @@ class TaskResponseProcessor(ABC):
389
384
  processed_results.append((tool_call, result))
390
385
 
391
386
  logging.info(f"Parallel execution completed for {len(tool_calls)} tools")
392
- langfuse.create_event(trace_context=self.trace_context, name="parallel_execution_completed", level="DEFAULT",
393
- status_message=(f"Parallel execution completed for {len(tool_calls)} tools"))
387
+ # langfuse.create_event(trace_context=self.trace_context, name="parallel_execution_completed", level="DEFAULT",
388
+ # status_message=(f"Parallel execution completed for {len(tool_calls)} tools"))
394
389
  return processed_results
395
390
 
396
391
  except Exception as e:
@@ -417,16 +412,11 @@ class TaskResponseProcessor(ABC):
417
412
  if assistant_message_id:
418
413
  metadata["assistant_message_id"] = assistant_message_id
419
414
  logging.info(f"Linking tool result to assistant message: {assistant_message_id}")
420
- langfuse.create_event(trace_context=self.trace_context, name="linking_tool_result_to_assistant_message", level="DEFAULT",
421
- status_message=(f"Linking tool result to assistant message: {assistant_message_id}"))
422
415
 
423
416
  # --- Add parsing details to metadata if available ---
424
417
  if parsing_details:
425
418
  metadata["parsing_details"] = parsing_details
426
419
  logging.info("Adding parsing_details to tool result metadata")
427
- langfuse.create_event(trace_context=self.trace_context, name="adding_parsing_details_to_tool_result_metadata", level="DEFAULT",
428
- status_message=(f"Adding parsing_details to tool result metadata"),
429
- metadata={"parsing_details": parsing_details})
430
420
 
431
421
  # For XML and other non-native tools, use the new structured format
432
422
  # Determine message role based on strategy
@@ -599,8 +589,8 @@ class TaskResponseProcessor(ABC):
599
589
  if context.function_name in ['ask', 'complete']:
600
590
  metadata["agent_should_terminate"] = "true"
601
591
  logging.info(f"Marking tool status for '{context.function_name}' with termination signal.")
602
- langfuse.create_event(trace_context=self.trace_context, name="marking_tool_status_for_termination", level="DEFAULT", status_message=(
603
- f"Marking tool status for '{context.function_name}' with termination signal."))
592
+ # langfuse.create_event(trace_context=self.trace_context, name="marking_tool_status_for_termination", level="DEFAULT", status_message=(
593
+ # f"Marking tool status for '{context.function_name}' with termination signal."))
604
594
  # <<< END ADDED >>>
605
595
 
606
596
  return self.add_response_message(
@@ -1,6 +1,7 @@
1
1
 
2
2
  import logging
3
3
  import json
4
+ import os
4
5
 
5
6
  from typing import List, Any, Dict, Optional, AsyncGenerator, Union, Literal
6
7
  from uuid import uuid4
@@ -12,16 +13,19 @@ from xgae.utils import langfuse, handle_error
12
13
  from xgae.utils.llm_client import LLMClient, LLMConfig
13
14
 
14
15
  from xgae.utils.json_helpers import format_for_yield
15
- from prompt_builder import XGAPromptBuilder
16
- from mcp_tool_box import XGAMcpToolBox
16
+ from xgae.engine.prompt_builder import XGAPromptBuilder
17
+ from xgae.engine.mcp_tool_box import XGAMcpToolBox
17
18
 
18
19
  class XGATaskEngine:
19
20
  def __init__(self,
20
21
  session_id: Optional[str] = None,
21
22
  task_id: Optional[str] = None,
22
23
  agent_id: Optional[str] = None,
23
- trace_id: Optional[str] = None,
24
+ general_tools: Optional[List[str]] = None,
25
+ custom_tools: Optional[List[str]] = None,
24
26
  system_prompt: Optional[str] = None,
27
+ max_auto_run: Optional[int] = None,
28
+ tool_exec_parallel: Optional[bool] = None,
25
29
  llm_config: Optional[LLMConfig] = None,
26
30
  prompt_builder: Optional[XGAPromptBuilder] = None,
27
31
  tool_box: Optional[XGAToolBox] = None):
@@ -34,125 +38,109 @@ class XGATaskEngine:
34
38
  self.is_stream = self.llm_client.is_stream
35
39
 
36
40
  self.prompt_builder = prompt_builder or XGAPromptBuilder(system_prompt)
37
- self.tool_box = tool_box or XGAMcpToolBox()
41
+ self.tool_box: XGAToolBox = tool_box or XGAMcpToolBox()
38
42
 
43
+ self.general_tools:List[str] = general_tools
44
+ self.custom_tools:List[str] = custom_tools
39
45
  self.task_response_msgs: List[XGAResponseMessage] = []
40
- self.task_no = -1
41
- self.task_run_id = f"{self.task_id}[{self.task_no}]"
42
- self.trace_id :str = trace_id or langfuse.create_trace_id()
43
-
44
- async def _post_init_(self, general_tools:List[str], custom_tools: List[str]) -> None:
45
- await self.tool_box.load_mcp_tools_schema()
46
- await self.tool_box.creat_task_tool_box(self.task_id, general_tools, custom_tools)
47
- general_tool_schemas = self.tool_box.get_task_tool_schemas(self.task_id, "general_tool")
48
- custom_tool_schemas = self.tool_box.get_task_tool_schemas(self.task_id, "custom_tool")
49
46
 
50
- self.task_prompt = self.prompt_builder.build_task_prompt(self.model_name, general_tool_schemas, custom_tool_schemas)
47
+ max_auto_run = max_auto_run if max_auto_run else int(os.getenv("MAX_AUTO_RUN", 15))
48
+ self.max_auto_run: int = 1 if max_auto_run <= 1 else max_auto_run
49
+ self.tool_exec_parallel = True if tool_exec_parallel is None else tool_exec_parallel
51
50
 
52
- @classmethod
53
- async def create(cls,
54
- session_id: Optional[str] = None,
55
- task_id: Optional[str] = None,
56
- agent_id: Optional[str] = None,
57
- trace_id: Optional[str] = None,
58
- system_prompt: Optional[str] = None,
59
- general_tools: Optional[List[str]] = None,
60
- custom_tools: Optional[List[str]] = None,
61
- llm_config: Optional[LLMConfig] = None,
62
- prompt_builder: Optional[XGAPromptBuilder] = None,
63
- tool_box: Optional[XGAToolBox] = None) -> 'XGATaskEngine':
64
- engine: XGATaskEngine = cls(session_id=session_id,
65
- task_id=task_id,
66
- agent_id=agent_id,
67
- trace_id=trace_id,
68
- system_prompt=system_prompt,
69
- llm_config=llm_config,
70
- prompt_builder=prompt_builder,
71
- tool_box=tool_box)
51
+ self.task_no = -1
52
+ self.task_run_id :str = None
72
53
 
73
- general_tools = general_tools or ["complete", "ask"]
74
- if "*" not in general_tools:
75
- if "complete" not in general_tools:
76
- general_tools.append("complete")
77
- elif "ask" not in general_tools:
78
- general_tools.append("ask")
54
+ self.task_prompt :str = None
55
+ self.trace_id :str = None
56
+ self.root_span_id :str = None
79
57
 
80
- custom_tools = custom_tools or []
81
- await engine._post_init_(general_tools, custom_tools)
58
+ async def run_task_with_final_answer(self,
59
+ task_message: Dict[str, Any],
60
+ trace_id: Optional[str] = None) -> XGATaskResult:
61
+ self.trace_id = trace_id or langfuse.create_trace_id()
62
+ with langfuse.start_as_current_span(trace_context={"trace_id": self.trace_id},
63
+ name="run_task_with_final_answer",
64
+ input=task_message,
65
+ metadata={"task_id": self.task_id},
66
+ ) as root_span:
67
+ self.root_span_id = root_span.id
68
+
69
+ chunks = []
70
+ async for chunk in self.run_task(task_message=task_message, trace_id=trace_id):
71
+ chunks.append(chunk)
72
+
73
+ if len(chunks) > 0:
74
+ final_result = self._parse_final_result(chunks)
75
+ else:
76
+ final_result = XGATaskResult(type="error", content="LLM Answer is Empty")
82
77
 
83
- logging.info("*"*30 + f" XGATaskEngine Task'{engine.task_id}' Initialized " + "*"*30)
84
- logging.info(f"model_name={engine.model_name}, is_stream={engine.is_stream}, trace_id={engine.trace_id}")
85
- logging.info(f"general_tools={general_tools}, custom_tools={custom_tools}")
78
+ root_span.update(output=final_result)
79
+ return final_result
86
80
 
87
- return engine
88
-
89
- async def run_task_with_final_answer(self,
90
- task_message: Dict[str, Any],
91
- max_auto_run: int = 25,
92
- trace_id: Optional[str] = None) -> XGATaskResult:
93
- chunks = []
94
- async for chunk in self.run_task(task_message=task_message, max_auto_run=max_auto_run, trace_id=trace_id):
95
- chunks.append(chunk)
96
- if len(chunks) > 0:
97
- final_result = self._parse_final_result(chunks)
98
- else:
99
- final_result = XGATaskResult(type="error", content="LLM Answer is Empty")
100
- return final_result
101
81
 
102
82
  async def run_task(self,
103
83
  task_message: Dict[str, Any],
104
- max_auto_run: int = 25,
105
84
  trace_id: Optional[str] = None) -> AsyncGenerator[Dict[str, Any], None]:
106
85
  try:
107
- self.trace_id = trace_id or self.trace_id or langfuse.create_trace_id()
86
+ await self._init_task()
87
+ if self.root_span_id is None:
88
+ self.trace_id = trace_id or langfuse.create_trace_id()
89
+ with langfuse.start_as_current_span(trace_context={"trace_id": self.trace_id},
90
+ name="run_task",
91
+ input=task_message
92
+ ) as root_span:
93
+ self.root_span_id = root_span.id
108
94
 
109
- self.task_no += 1
110
- self.task_run_id = f"{self.task_id}[{self.task_no}]"
111
95
 
112
96
  self.add_response_message(type="user", content=task_message, is_llm_message=True)
113
97
 
114
98
  continuous_state: TaskRunContinuousState = {
115
99
  "accumulated_content": "",
116
100
  "auto_continue_count": 0,
117
- "auto_continue": False if max_auto_run <= 1 else True,
118
- "max_auto_run": max_auto_run
101
+ "auto_continue": False if self.max_auto_run <= 1 else True
119
102
  }
120
103
  async for chunk in self._run_task_auto(continuous_state):
121
104
  yield chunk
122
105
  finally:
123
106
  await self.tool_box.destroy_task_tool_box(self.task_id)
107
+ self.root_span_id = None
124
108
 
125
- async def _run_task_once(self, continuous_state: TaskRunContinuousState) -> AsyncGenerator[Dict[str, Any], None]:
126
- llm_messages = [{"role": "system", "content": self.task_prompt}]
127
- cxt_llm_contents = self.get_history_llm_messages()
128
- llm_messages.extend(cxt_llm_contents)
129
109
 
130
- partial_content = continuous_state.get('accumulated_content', '')
131
- if partial_content:
132
- temp_assistant_message = {
133
- "role": "assistant",
134
- "content": partial_content
135
- }
136
- llm_messages.append(temp_assistant_message)
110
+ async def _init_task(self) -> None:
111
+ self.task_no = self.task_no + 1
112
+ self.task_run_id = f"{self.task_id}[{self.task_no}]"
137
113
 
138
- llm_response = await self.llm_client.create_completion(llm_messages)
139
- response_processor = self._create_response_processer()
114
+ general_tools = self.general_tools or ["complete", "ask"]
115
+ if "*" not in general_tools:
116
+ if "complete" not in general_tools:
117
+ general_tools.append("complete")
118
+ elif "ask" not in general_tools:
119
+ general_tools.append("ask")
140
120
 
141
- async for chunk in response_processor.process_response(llm_response, llm_messages, continuous_state):
142
- self._logging_reponse_chunk(chunk)
143
- yield chunk
121
+ custom_tools = self.custom_tools or []
122
+ if isinstance(self.tool_box, XGAMcpToolBox):
123
+ await self.tool_box.load_mcp_tools_schema()
144
124
 
145
- async def _run_task_auto(self, continuous_state: TaskRunContinuousState) -> AsyncGenerator[Dict[str, Any], None]:
146
- max_auto_run = continuous_state['max_auto_run']
147
- max_auto_run = max_auto_run if max_auto_run > 0 else 1
125
+ await self.tool_box.creat_task_tool_box(self.task_id, general_tools, custom_tools)
126
+ general_tool_schemas = self.tool_box.get_task_tool_schemas(self.task_id, "general_tool")
127
+ custom_tool_schemas = self.tool_box.get_task_tool_schemas(self.task_id, "custom_tool")
128
+
129
+ self.task_prompt = self.prompt_builder.build_task_prompt(self.model_name, general_tool_schemas, custom_tool_schemas)
148
130
 
131
+ logging.info("*" * 30 + f" XGATaskEngine Task'{self.task_id}' Initialized " + "*" * 30)
132
+ logging.info(f"model_name={self.model_name}, is_stream={self.is_stream}, trace_id={self.trace_id}")
133
+ logging.info(f"general_tools={general_tools}, custom_tools={custom_tools}")
134
+
135
+
136
+ async def _run_task_auto(self, continuous_state: TaskRunContinuousState) -> AsyncGenerator[Dict[str, Any], None]:
149
137
  def update_continuous_state(_auto_continue_count, _auto_continue):
150
138
  continuous_state["auto_continue_count"] = _auto_continue_count
151
139
  continuous_state["auto_continue"] = _auto_continue
152
140
 
153
141
  auto_continue_count = 0
154
142
  auto_continue = True
155
- while auto_continue and auto_continue_count < max_auto_run:
143
+ while auto_continue and auto_continue_count < self.max_auto_run:
156
144
  auto_continue = False
157
145
 
158
146
  try:
@@ -180,7 +168,7 @@ class XGATaskEngine:
180
168
  auto_continue = True
181
169
  auto_continue_count += 1
182
170
  update_continuous_state(auto_continue_count, auto_continue)
183
- logging.info(f"run_task_auto: Detected finish_reason='{finish_reason}', auto-continuing ({auto_continue_count}/{max_auto_run})")
171
+ logging.info(f"run_task_auto: Detected finish_reason='{finish_reason}', auto-continuing ({auto_continue_count}/{self.max_auto_run})")
184
172
  except Exception as parse_error:
185
173
  logging.error(f"run_task_auto: Error in parse chunk: {str(parse_error)}")
186
174
  content = {"role": "system", "status_type": "error", "message": "Parse response chunk Error"}
@@ -194,6 +182,28 @@ class XGATaskEngine:
194
182
  error_msg = self.add_response_message(type="status", content=content, is_llm_message=False)
195
183
  yield format_for_yield(error_msg)
196
184
 
185
+
186
+ async def _run_task_once(self, continuous_state: TaskRunContinuousState) -> AsyncGenerator[Dict[str, Any], None]:
187
+ llm_messages = [{"role": "system", "content": self.task_prompt}]
188
+ cxt_llm_contents = self.get_history_llm_messages()
189
+ llm_messages.extend(cxt_llm_contents)
190
+
191
+ partial_content = continuous_state.get('accumulated_content', '')
192
+ if partial_content:
193
+ temp_assistant_message = {
194
+ "role": "assistant",
195
+ "content": partial_content
196
+ }
197
+ llm_messages.append(temp_assistant_message)
198
+
199
+ llm_response = await self.llm_client.create_completion(llm_messages, self.trace_id)
200
+ response_processor = self._create_response_processer()
201
+
202
+ async for chunk in response_processor.process_response(llm_response, llm_messages, continuous_state):
203
+ self._logging_reponse_chunk(chunk)
204
+ yield chunk
205
+
206
+
197
207
  def _parse_final_result(self, chunks: List[Dict[str, Any]]) -> XGATaskResult:
198
208
  final_result: XGATaskResult = None
199
209
  try:
@@ -245,6 +255,7 @@ class XGATaskEngine:
245
255
 
246
256
  return final_result
247
257
 
258
+
248
259
  def add_response_message(self, type: XGAResponseMsgType,
249
260
  content: Union[Dict[str, Any], List[Any], str],
250
261
  is_llm_message: bool,
@@ -289,6 +300,7 @@ class XGATaskEngine:
289
300
 
290
301
  return response_llm_contents
291
302
 
303
+
292
304
  def _create_response_processer(self) -> TaskResponseProcessor:
293
305
  response_context = self._create_response_context()
294
306
  is_stream = response_context.get("is_stream", False)
@@ -305,11 +317,12 @@ class XGATaskEngine:
305
317
  "task_id": self.task_id,
306
318
  "task_run_id": self.task_run_id,
307
319
  "trace_id": self.trace_id,
320
+ "root_span_id": self.root_span_id,
308
321
  "model_name": self.model_name,
309
322
  "max_xml_tool_calls": 0,
310
323
  "add_response_msg_func": self.add_response_message,
311
324
  "tool_box": self.tool_box,
312
- "tool_execution_strategy": "sequential" ,#"parallel",
325
+ "tool_execution_strategy": "parallel" if self.tool_exec_parallel else "sequential" ,#,
313
326
  "xml_adding_strategy": "user_message",
314
327
  }
315
328
  return response_context
@@ -338,23 +351,17 @@ if __name__ == "__main__":
338
351
 
339
352
  async def main():
340
353
  tool_box = XGAMcpToolBox(custom_mcp_server_file="mcpservers/custom_servers.json")
341
- system_prompt = read_file("templates/scp_test_prompt.txt")
342
- engine = await XGATaskEngine.create(tool_box=tool_box,
343
- general_tools=[],
344
- custom_tools=["bomc_fault.*"],
345
- llm_config=LLMConfig(stream=False),
346
- system_prompt=system_prompt)
347
-
348
- final_result = await engine.run_task_with_final_answer(task_message={"role": "user", "content": "定位10.0.1.1故障"},max_auto_run=8)
354
+ system_prompt = read_file("templates/example_user_prompt.txt")
355
+ engine = XGATaskEngine(tool_box=tool_box,
356
+ general_tools=[],
357
+ custom_tools=["*"],
358
+ llm_config=LLMConfig(stream=False),
359
+ system_prompt=system_prompt,
360
+ max_auto_run=8)
361
+
362
+ final_result = await engine.run_task_with_final_answer(task_message={"role": "user",
363
+ "content": "locate 10.0.0.1 fault and solution"})
349
364
  print("FINAL RESULT:", final_result)
350
365
 
351
- # ==== test streaming response ========
352
- #chunks = []
353
- # async for chunk in engine.run_task(task_message={"role": "user", "content": "定位10.0.0.1的故障"}, max_auto_run=8):
354
- # print(chunk)
355
366
 
356
- # ==== test no tool call ========
357
- # engine = await XGATaskEngine.create(llm_config=LLMConfig(stream=False))
358
- # final_result = await engine.run_task_with_final_answer(task_message={"role": "user", "content": "1+1"}, max_auto_run=2)
359
- # print("FINAL RESULT:", final_result)
360
367
  asyncio.run(main())
@@ -0,0 +1,48 @@
1
+ from typing import Annotated, Optional
2
+ from pydantic import Field
3
+
4
+ from mcp.server.fastmcp import FastMCP
5
+
6
+ from xgae.engine.engine_base import XGAToolResult
7
+
8
+ mcp = FastMCP(name="XGAE Message Tools")
9
+
10
+ @mcp.tool(
11
+ description="""A special tool to indicate you have completed all tasks and are about to enter complete state. Use ONLY when: 1) All tasks in todo.md are marked complete [x], 2) The user's original request has been fully addressed, 3) There are no pending actions or follow-ups required, 4) You've delivered all final outputs and results to the user. IMPORTANT: This is the ONLY way to properly terminate execution. Never use this tool unless ALL tasks are complete and verified. Always ensure you've provided all necessary outputs and references before using this tool. Include relevant attachments when the completion relates to specific files or resources."""
12
+ )
13
+ async def complete(task_id: str,
14
+ text: Annotated[Optional[str], Field(default=None,
15
+ description="Completion summary. Include: 1) Task summary 2) Key deliverables 3) Next steps 4) Impact achieved")],
16
+ attachments: Annotated[Optional[str], Field(default=None,
17
+ description="Comma-separated list of final outputs. Use when: 1) Completion relates to files 2) User needs to review outputs 3) Deliverables in files")]
18
+ ):
19
+ print(f"<XGAETools-complete>: task_id={task_id}, text={text}, attachments={attachments}")
20
+ return XGAToolResult(success=True, output=str({"status": "complete"}))
21
+
22
+
23
+ @mcp.tool(
24
+ description="""Ask user a question and wait for response. Use for: 1) Requesting clarification on ambiguous requirements, 2) Seeking confirmation before proceeding with high-impact changes, 3) Gathering additional information needed to complete a task, 4) Offering options and requesting user preference, 5) Validating assumptions when critical to task success, 6) When encountering unclear or ambiguous results during task execution, 7) When tool results don't match expectations, 8) For natural conversation and follow-up questions, 9) When research reveals multiple entities with the same name, 10) When user requirements are unclear or could be interpreted differently. IMPORTANT: Use this tool when user input is essential to proceed. Always provide clear context and options when applicable. Use natural, conversational language that feels like talking with a helpful friend. Include relevant attachments when the question relates to specific files or resources. CRITICAL: When you discover ambiguity (like multiple people with the same name), immediately stop and ask for clarification rather than making assumptions."""
25
+ )
26
+ async def ask(task_id: str,
27
+ text: Annotated[str, Field(
28
+ description="Question text to present to user. Include: 1) Clear question/request 2) Context why input is needed 3) Available options 4) Impact of choices 5) Relevant constraints")],
29
+ attachments: Annotated[Optional[str], Field(default=None,
30
+ description="Comma-separated list of files/URLs to attach. Use when: 1) Question relates to files/configs 2) User needs to review content 3) Options documented in files 4) Supporting evidence needed")]
31
+ ):
32
+ print(f"<XGAETools-ask>: task_id={task_id}, text={text}, attachments={attachments}")
33
+ return XGAToolResult(success=True, output=str({"status": "Awaiting user response..."}))
34
+
35
+ @mcp.tool(
36
+ description="end task, destroy sandbox"
37
+ )
38
+ async def end_task(task_id: str) :
39
+ print(f"<XGAETools-end_task> task_id: {task_id}")
40
+
41
+
42
+
43
+ def main():
44
+ #print("="*20 + " XGAE Message Tools Sever Started in Stdio mode " + "="*20)
45
+ mcp.run(transport="stdio")
46
+
47
+ if __name__ == "__main__":
48
+ main()
xgae/utils/llm_client.py CHANGED
@@ -47,6 +47,7 @@ class LLMClient:
47
47
  reasoning_effort: Optional level of reasoning effort, default is ‘low’
48
48
  top_p: Optional Top-p sampling parameter, default is None
49
49
  """
50
+
50
51
  llm_config = llm_config or LLMConfig()
51
52
  litellm.modify_params = True
52
53
  litellm.drop_params = True
@@ -205,9 +206,10 @@ class LLMClient:
205
206
  logging.debug(f"LLMClient: Waiting {delay} seconds before retry llm completion...")
206
207
  await asyncio.sleep(delay)
207
208
 
208
-
209
- async def create_completion(self, messages: List[Dict[str, Any]]) -> Union[ModelResponse, CustomStreamWrapper]:
209
+ async def create_completion(self, messages: List[Dict[str, Any]], trace_id: Optional[str]=None) -> Union[ModelResponse, CustomStreamWrapper]:
210
210
  complete_params = self._prepare_complete_params(messages)
211
+ if trace_id:
212
+ complete_params["litellm_trace_id"] = trace_id
211
213
 
212
214
  last_error = None
213
215
  for attempt in range(self.max_retries):
@@ -226,10 +228,13 @@ class LLMClient:
226
228
  raise LLMError(f"LLM completion failed after {self.max_retries} attempts !")
227
229
 
228
230
  if __name__ == "__main__":
231
+ from xgae.utils import langfuse
232
+
229
233
  async def llm_completion():
230
234
  llm_client = LLMClient(LLMConfig(stream=False))
231
235
  messages = [{"role": "user", "content": "今天是2025年8月15日,北京本周每天温度"}]
232
- response = await llm_client.create_completion(messages)
236
+ trace_id = langfuse.create_trace_id()
237
+ response = await llm_client.create_completion(messages, trace_id)
233
238
  if llm_client.is_stream:
234
239
  async for chunk in response:
235
240
  choices = chunk.get("choices", [{}])
xgae/utils/setup_env.py CHANGED
@@ -64,6 +64,7 @@ def setup_langfuse() -> Langfuse:
64
64
  public_key=env_public_key,
65
65
  secret_key=env_secret_key,
66
66
  host=env_host)
67
+
67
68
  logging.info("Langfuse initialized Successfully by Key !")
68
69
  else:
69
70
  _langfuse = Langfuse(tracing_enabled=False)
@@ -74,5 +75,6 @@ def setup_langfuse() -> Langfuse:
74
75
 
75
76
  if __name__ == "__main__":
76
77
  from xgae.utils import langfuse
78
+
77
79
  trace_id = langfuse.create_trace_id()
78
80
  logging.warning(f"trace_id={trace_id}")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: xgae
3
- Version: 0.1.6
3
+ Version: 0.1.7
4
4
  Summary: Extreme General Agent Engine
5
5
  Requires-Python: >=3.13
6
6
  Requires-Dist: colorlog>=6.9.0
@@ -0,0 +1,19 @@
1
+ xgae/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ xgae/engine/engine_base.py,sha256=ySERuLy1YWsf-3s0NFKcyTnXQ4g69wR-cQhtnG0OFmU,1747
3
+ xgae/engine/mcp_tool_box.py,sha256=iBfCLWXvbkKWd1JnSgBLA1T1KdSdpEZCrROkC_cXp7g,10132
4
+ xgae/engine/prompt_builder.py,sha256=8_rNRJksn2QLV_K98S0x0qNeHcmxhU0kB_53IZJTGOU,4366
5
+ xgae/engine/task_engine.py,sha256=mjS7oDlJH-1wBohzz1FfFng2nE_K0xL-QaNZDuQss3A,17986
6
+ xgae/engine/responser/non_stream_responser.py,sha256=kOP9kDEhHtrDKMyVnBPWEx0yO-xTvVacfXGJJqFtONU,6395
7
+ xgae/engine/responser/responser_base.py,sha256=rn3-LkS_alZUwdIxPfDGC_zAH_CncSQ2I-euYA6a45w,30524
8
+ xgae/engine/responser/stream_responser.py,sha256=5KzCHApiPplZ-zN_sbbEbSvj2rtvKWBshJKe_-x7RDI,52927
9
+ xgae/tools/without_general_tools_app.py,sha256=QknIF4OW9xvOad8gx-F_sCBwQYXqMalnNFvYvZXkQ_I,3789
10
+ xgae/utils/__init__.py,sha256=jChvD-p_p5gsrCZUVYPUGJs4CS9gIdNFcSOpkRpcM4Y,317
11
+ xgae/utils/json_helpers.py,sha256=K1ja6GJCatrAheW9bEWAYSQbDI42__boBCZgtsv1gtk,4865
12
+ xgae/utils/llm_client.py,sha256=RvID4bL9yZon096uvuoFZPlqAPiHhET9-9qYp6sUERc,12605
13
+ xgae/utils/misc.py,sha256=EK94YesZp8AmRUqWfN-CjTxyEHPWdIIWpFNO17dzm9g,915
14
+ xgae/utils/setup_env.py,sha256=EVk0KG92Sk6ejBxXZbDDr_dc3KM8GFMofMA4HvXqSfM,2409
15
+ xgae/utils/xml_tool_parser.py,sha256=EJ6BjpD4CSdmS_LqViUJ6P8H9GY2R1e4Dh8rLCR6nSE,7474
16
+ xgae-0.1.7.dist-info/METADATA,sha256=AbGuJUOv4574WF9a2nQNaTxuOKEN2K_U7RaACci2PME,309
17
+ xgae-0.1.7.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
18
+ xgae-0.1.7.dist-info/entry_points.txt,sha256=rhQ9Vksnu8nA78lPTjiJxOCZ5k6sH6s5YNMR68y7C-A,73
19
+ xgae-0.1.7.dist-info/RECORD,,
@@ -0,0 +1,2 @@
1
+ [console_scripts]
2
+ xgae-tools = xgae.tools.without_general_tools_app:main
@@ -1,17 +0,0 @@
1
- xgae/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- xgae/engine/engine_base.py,sha256=ySERuLy1YWsf-3s0NFKcyTnXQ4g69wR-cQhtnG0OFmU,1747
3
- xgae/engine/mcp_tool_box.py,sha256=6mdvu9-aquyLJEwebTtpa_bfGmgT1jPszKE90NIpR5c,9852
4
- xgae/engine/prompt_builder.py,sha256=ygFAIc4p3opIMyl6g1JeBuSiMjNVxwRloKeF2eX8R5I,4354
5
- xgae/engine/task_engine.py,sha256=xxAWtPfKgSpf6L7wOc243U-7YP8AC2WYoCI-FUdDpOc,18132
6
- xgae/engine/responser/non_stream_responser.py,sha256=QEFE4JGYVaIbFeMUMJa1Mt1uBblU_hAOywAhyp9V1k4,6634
7
- xgae/engine/responser/responser_base.py,sha256=aHKJ880B1ezfBWzyHoOSNVDb-CJY4ujH2MGm61aJLy8,31468
8
- xgae/engine/responser/stream_responser.py,sha256=5KzCHApiPplZ-zN_sbbEbSvj2rtvKWBshJKe_-x7RDI,52927
9
- xgae/utils/__init__.py,sha256=jChvD-p_p5gsrCZUVYPUGJs4CS9gIdNFcSOpkRpcM4Y,317
10
- xgae/utils/json_helpers.py,sha256=K1ja6GJCatrAheW9bEWAYSQbDI42__boBCZgtsv1gtk,4865
11
- xgae/utils/llm_client.py,sha256=mgzn8heUyRm92HTLEYGdfsGEpFtD-xLFr39P98_JP0s,12402
12
- xgae/utils/misc.py,sha256=EK94YesZp8AmRUqWfN-CjTxyEHPWdIIWpFNO17dzm9g,915
13
- xgae/utils/setup_env.py,sha256=Nc0HCQOnK-EGNLTWCQ9-iYysNRdIvwGhcHdqpNeV910,2407
14
- xgae/utils/xml_tool_parser.py,sha256=EJ6BjpD4CSdmS_LqViUJ6P8H9GY2R1e4Dh8rLCR6nSE,7474
15
- xgae-0.1.6.dist-info/METADATA,sha256=Q5OiPe5W3H7ym2TDPaM1x3k6jSTIol3QDyWI0dsQetw,309
16
- xgae-0.1.6.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
17
- xgae-0.1.6.dist-info/RECORD,,
File without changes