xgae 0.1.6__py3-none-any.whl → 0.1.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of xgae might be problematic. Click here for more details.

@@ -55,4 +55,4 @@ class XGAToolBox(ABC):
55
55
 
56
56
  @abstractmethod
57
57
  async def call_tool(self, task_id: str, tool_name: str, args: Optional[Dict[str, Any]] = None) -> XGAToolResult:
58
- pass
58
+ pass
@@ -8,7 +8,6 @@ from langchain_mcp_adapters.client import MultiServerMCPClient
8
8
  from langchain_mcp_adapters.tools import load_mcp_tools
9
9
 
10
10
  from xgae.engine.engine_base import XGAError, XGAToolSchema, XGAToolBox, XGAToolResult
11
- from xgae.utils import langfuse
12
11
 
13
12
  class XGAMcpToolBox(XGAToolBox):
14
13
  GENERAL_MCP_SERVER_NAME = "xga_general"
@@ -46,6 +45,12 @@ class XGAMcpToolBox(XGAToolBox):
46
45
  task_tool_schemas[tool_schema.tool_name] = tool_schema
47
46
  task_tool_schemas.pop("end_task", None)
48
47
 
48
+ if len(custom_tools) == 1 and custom_tools[0] == "*":
49
+ custom_tools = []
50
+ for server_name in self.mcp_server_names:
51
+ if server_name != XGAMcpToolBox.GENERAL_MCP_SERVER_NAME:
52
+ custom_tools.append(f"{server_name}.*")
53
+
49
54
  for server_tool_name in custom_tools:
50
55
  parts = server_tool_name.split(".")
51
56
  if len(parts) != 2:
@@ -188,8 +193,8 @@ if __name__ == "__main__":
188
193
 
189
194
  async def main():
190
195
  task_id = "task1"
191
- #mcp_tool_box = XGAMcpToolBox(custom_mcp_server_file="mcpservers/custom_servers.json")
192
- mcp_tool_box = XGAMcpToolBox()
196
+ mcp_tool_box = XGAMcpToolBox(custom_mcp_server_file="mcpservers/custom_servers.json")
197
+ #mcp_tool_box = XGAMcpToolBox()
193
198
  await mcp_tool_box.load_mcp_tools_schema()
194
199
  await mcp_tool_box.creat_task_tool_box(task_id=task_id, general_tools=["*"], custom_tools=["bomc_fault.*"])
195
200
  tool_schemas = mcp_tool_box.get_task_tool_schemas(task_id, "general_tool")
@@ -204,9 +209,6 @@ if __name__ == "__main__":
204
209
  print(asdict(tool_schema))
205
210
  print()
206
211
 
207
- result = await mcp_tool_box.call_tool(task_id=task_id, tool_name="web_search", args={"task_id": task_id, "query": "查询天津天气"})
208
- print(f"call web_search result: {result}")
209
-
210
212
  result = await mcp_tool_box.call_tool(task_id=task_id, tool_name="complete", args={"task_id": task_id})
211
213
  print(f"call complete result: {result}")
212
214
 
@@ -3,7 +3,7 @@ import datetime
3
3
 
4
4
  from typing import Optional, List
5
5
 
6
- from engine_base import XGAToolSchema, XGAError
6
+ from xgae.engine.engine_base import XGAToolSchema, XGAError
7
7
  from xgae.utils.misc import read_file, format_file_with_args
8
8
 
9
9
 
@@ -3,7 +3,6 @@ import logging
3
3
  from typing import List, Dict, Any, AsyncGenerator, override,Optional
4
4
 
5
5
  from xgae.engine.responser.responser_base import TaskResponseProcessor, TaskResponserContext, TaskRunContinuousState
6
- from xgae.utils import langfuse
7
6
  from xgae.utils.json_helpers import format_for_yield
8
7
 
9
8
  class NonStreamTaskResponser(TaskResponseProcessor):
@@ -16,6 +15,7 @@ class NonStreamTaskResponser(TaskResponseProcessor):
16
15
  llm_content = ""
17
16
  parsed_xml_data = []
18
17
  finish_reason = None
18
+ llm_count = continuous_state.get("auto_continue_count")
19
19
 
20
20
  try:
21
21
  # Extract finish_reason, content, tool calls
@@ -24,8 +24,8 @@ class NonStreamTaskResponser(TaskResponseProcessor):
24
24
  finish_reason = llm_response.choices[0].finish_reason
25
25
  logging.info(f"NonStreamTask:LLM response finish_reason={finish_reason}")
26
26
 
27
- langfuse.create_event(trace_context=self.trace_context, name="non_streaming_finish_reason", level="DEFAULT",
28
- status_message=(f"Non-streaming finish_reason: {finish_reason}"))
27
+ self.root_span.event(name=f"non_stream_processor_start[{self.task_no}]({llm_count})", level="DEFAULT",
28
+ status_message=(f"finish_reason={finish_reason}, tool_exec_strategy={self.tool_execution_strategy}"))
29
29
 
30
30
  response_message = llm_response.choices[0].message if hasattr(llm_response.choices[0], 'message') else None
31
31
  if response_message:
@@ -54,8 +54,6 @@ class NonStreamTaskResponser(TaskResponseProcessor):
54
54
  tool_calls_to_execute = [item['tool_call'] for item in parsed_xml_data]
55
55
  if len(tool_calls_to_execute) > 0:
56
56
  logging.info(f"NonStreamTask:Executing {len(tool_calls_to_execute)} tools with strategy: {self.tool_execution_strategy}")
57
- langfuse.create_event(trace_context=self.trace_context, name="executing_tools_with_strategy", level="DEFAULT", status_message=(
58
- f"NonStreamTask Executing {len(tool_calls_to_execute)} tools with strategy: {self.tool_execution_strategy}"))
59
57
 
60
58
  tool_results = await self._execute_tools(tool_calls_to_execute, self.tool_execution_strategy)
61
59
 
@@ -92,7 +90,7 @@ class NonStreamTaskResponser(TaskResponseProcessor):
92
90
 
93
91
  except Exception as e:
94
92
  logging.error(f"NonStreamTask: Error processing non-streaming response: {llm_content}")
95
- langfuse.create_event(trace_context=self.trace_context, name="error_processing_non_streaming_response", level="ERROR",
93
+ self.root_span.event(name="error_processing_non_streaming_response", level="ERROR",
96
94
  status_message=(f"Error processing non-streaming response: {str(e)}"))
97
95
 
98
96
  content = {"role": "system", "status_type": "error", "message": str(e)}
@@ -102,7 +100,7 @@ class NonStreamTaskResponser(TaskResponseProcessor):
102
100
 
103
101
  # Re-raise the same exception (not a new one) to ensure proper error propagation
104
102
  logging.critical(f"NonStreamTask: Re-raising error to stop further processing: {str(e)}")
105
- langfuse.create_event(trace_context=self.trace_context, name="re_raising_error_to_stop_further_processing", level="CRITICAL",
103
+ self.root_span.event(name="re_raising_error_to_stop_further_processing", level="CRITICAL",
106
104
  status_message=(f"Re-raising error to stop further processing: {str(e)}"))
107
105
  raise # Use bare 'raise' to preserve the original exception with its traceback
108
106
 
@@ -7,8 +7,9 @@ from dataclasses import dataclass
7
7
  from typing import List, Dict, Any, Optional, Tuple, Union, Literal, Callable, TypedDict, AsyncGenerator
8
8
 
9
9
  from xgae.engine.engine_base import XGAToolResult, XGAToolBox
10
- from xgae.utils import langfuse
11
- from xgae.utils.json_helpers import safe_json_parse,format_for_yield
10
+ from xgae.utils.setup_env import langfuse
11
+
12
+ from xgae.utils.json_helpers import safe_json_parse, format_for_yield
12
13
  from xgae.utils.xml_tool_parser import XMLToolParser
13
14
 
14
15
  # Type alias for XML result adding strategy
@@ -21,7 +22,9 @@ class TaskResponserContext(TypedDict, total=False):
21
22
  is_stream: bool
22
23
  task_id: str
23
24
  task_run_id: str
25
+ task_no: int
24
26
  trace_id: str
27
+ root_span_id: str
25
28
  model_name: str
26
29
  max_xml_tool_calls: int # LLM generate max_xml_tool limit, 0 is no limit
27
30
  add_response_msg_func: Callable
@@ -34,7 +37,7 @@ class TaskRunContinuousState(TypedDict, total=False):
34
37
  accumulated_content: str
35
38
  auto_continue_count: int
36
39
  auto_continue: bool
37
- max_auto_run: int
40
+
38
41
 
39
42
  @dataclass
40
43
  class ToolExecutionContext:
@@ -55,15 +58,12 @@ class TaskResponseProcessor(ABC):
55
58
 
56
59
  self.task_id = response_context.get("task_id")
57
60
  self.task_run_id = response_context.get("task_run_id")
61
+ self.task_no = response_context.get("task_no")
58
62
  self.tool_execution_strategy = self.response_context.get("tool_execution_strategy", "parallel")
59
63
  self.xml_adding_strategy = self.response_context.get("xml_adding_strategy", "user_message")
60
64
  self.max_xml_tool_calls = self.response_context.get("max_xml_tool_calls", 0)
61
65
 
62
- self.trace_context = {
63
- "trace_id": self.response_context.get("trace_id"),
64
- "parent_span_id": None
65
- }
66
-
66
+ self.root_span = langfuse.span(trace_id=response_context.get("trace_id"), id=response_context.get("root_span_id"))
67
67
  self.add_response_message = response_context.get("add_response_msg_func")
68
68
 
69
69
  self.tool_box = response_context.get("tool_box")
@@ -173,7 +173,7 @@ class TaskResponseProcessor(ABC):
173
173
  except Exception as e:
174
174
  logging.error(f"Error extracting XML chunks: {e}")
175
175
  logging.error(f"Content was: {content}")
176
- langfuse.create_event(trace_context=self.trace_context, name="error_extracting_xml_chunks", level="ERROR",
176
+ self.root_span.event(name="error_extracting_xml_chunks", level="ERROR",
177
177
  status_message=(f"Error extracting XML chunks: {e}"), metadata={"content": content})
178
178
 
179
179
  return chunks
@@ -223,7 +223,7 @@ class TaskResponseProcessor(ABC):
223
223
  except Exception as e:
224
224
  logging.error(f"Error parsing XML chunk: {e}")
225
225
  logging.error(f"XML chunk was: {xml_chunk}")
226
- langfuse.create_event(trace_context=self.trace_context, name="error_parsing_xml_chunk", level="ERROR",
226
+ self.root_span.event(name="error_parsing_xml_chunk", level="ERROR",
227
227
  status_message=(f"Error parsing XML chunk: {e}"), metadata={"xml_chunk": xml_chunk})
228
228
  return None
229
229
 
@@ -249,7 +249,7 @@ class TaskResponseProcessor(ABC):
249
249
 
250
250
  except Exception as e:
251
251
  logging.error(f"Error parsing XML tool calls: {e}", exc_info=True)
252
- langfuse.create_event(trace_context=self.trace_context, name="error_parsing_xml_tool_calls", level="ERROR",
252
+ self.root_span.event(name="error_parsing_xml_tool_calls", level="ERROR",
253
253
  status_message=(f"Error parsing XML tool calls: {e}"), metadata={"content": content})
254
254
 
255
255
  return parsed_data
@@ -257,15 +257,12 @@ class TaskResponseProcessor(ABC):
257
257
 
258
258
  async def _execute_tool(self, tool_call: Dict[str, Any]) -> XGAToolResult:
259
259
  """Execute a single tool call and return the result."""
260
- span = langfuse.start_span(trace_context=self.trace_context, name=f"execute_tool.{tool_call['function_name']}", input=tool_call["arguments"])
261
- self.trace_context["parent_span_id"] = span.id
260
+ exec_tool_span = self.root_span.span(name=f"execute_tool.{tool_call['function_name']}", input=tool_call["arguments"])
262
261
  try:
263
262
  function_name = tool_call["function_name"]
264
263
  arguments = tool_call["arguments"]
265
264
 
266
265
  logging.info(f"Executing tool: {function_name} with arguments: {arguments}")
267
- langfuse.create_event(trace_context=self.trace_context, name="executing_tool", level="DEFAULT",
268
- status_message=(f"Executing tool: {function_name} with arguments: {arguments}"))
269
266
 
270
267
  if isinstance(arguments, str):
271
268
  try:
@@ -281,13 +278,13 @@ class TaskResponseProcessor(ABC):
281
278
  logging.error(f"Tool function '{function_name}' not found in registry")
282
279
  result = XGAToolResult(success=False, output=f"Tool function '{function_name}' not found")
283
280
  logging.info(f"Tool execution complete: {function_name} -> {result}")
284
- langfuse.update_current_span(status_message="tool_executed", output=result)
281
+ exec_tool_span.update(status_message="tool_executed", output=result)
285
282
 
286
283
  return result
287
284
  except Exception as e:
288
285
  logging.error(f"Error executing tool {tool_call['function_name']}: {str(e)}", exc_info=True)
289
286
 
290
- langfuse.update_current_span(status_message="tool_execution_error", output=f"Error executing tool: {str(e)}", level="ERROR")
287
+ exec_tool_span.update(status_message="tool_execution_error", output=f"Error executing tool: {str(e)}", level="ERROR")
291
288
  return XGAToolResult(success=False, output=f"Error executing tool: {str(e)}")
292
289
 
293
290
  async def _execute_tools(
@@ -296,8 +293,6 @@ class TaskResponseProcessor(ABC):
296
293
  execution_strategy: ToolExecutionStrategy = "sequential"
297
294
  ) -> List[Tuple[Dict[str, Any], XGAToolResult]]:
298
295
  logging.info(f"Executing {len(tool_calls)} tools with strategy: {execution_strategy}")
299
- langfuse.create_event(trace_context=self.trace_context, name="executing_tools_with_strategy", level="DEFAULT",
300
- status_message=(f"Executing {len(tool_calls)} tools with strategy: {execution_strategy}"))
301
296
 
302
297
  if execution_strategy == "sequential":
303
298
  return await self._execute_tools_sequentially(tool_calls)
@@ -324,7 +319,7 @@ class TaskResponseProcessor(ABC):
324
319
  return []
325
320
  tool_names = [t.get('function_name', 'unknown') for t in tool_calls]
326
321
  logging.info(f"Executing {len(tool_calls)} tools sequentially: {tool_names}")
327
- langfuse.create_event(trace_context=self.trace_context, name="executing_tools_sequentially", level="DEFAULT",
322
+ self.root_span.event(name="executing_tools_sequentially", level="DEFAULT",
328
323
  status_message=(f"Executing {len(tool_calls)} tools sequentially: {tool_names}"))
329
324
 
330
325
  results = []
@@ -340,22 +335,20 @@ class TaskResponseProcessor(ABC):
340
335
  # Check if this is a terminating tool (ask or complete)
341
336
  if tool_name in ['ask', 'complete']:
342
337
  logging.info(f"Terminating tool '{tool_name}' executed. Stopping further tool execution.")
343
- langfuse.create_event(trace_context=self.trace_context, name="terminating_tool_executed",
344
- level="DEFAULT", status_message=(
345
- f"Terminating tool '{tool_name}' executed. Stopping further tool execution."))
338
+ # self.root_span.event(name="terminating_tool_executed",
339
+ # level="DEFAULT", status_message=(f"Terminating tool '{tool_name}' executed. Stopping further tool execution."))
346
340
  break # Stop executing remaining tools
347
341
 
348
342
  except Exception as e:
349
343
  logging.error(f"Error executing tool {tool_name}: {str(e)}")
350
- langfuse.create_event(trace_context=self.trace_context, name="error_executing_tool", level="ERROR",
344
+ self.root_span.event(name="error_executing_tool", level="ERROR",
351
345
  status_message=(f"Error executing tool {tool_name}: {str(e)}"))
352
346
  error_result = XGAToolResult(success=False, output=f"Error executing tool: {str(e)}")
353
347
  results.append((tool_call, error_result))
354
348
 
355
349
  logging.info(f"Sequential execution completed for {len(results)} tools (out of {len(tool_calls)} total)")
356
- langfuse.create_event(trace_context=self.trace_context, name="sequential_execution_completed", level="DEFAULT",
357
- status_message=(
358
- f"Sequential execution completed for {len(results)} tools (out of {len(tool_calls)} total)"))
350
+ # self.root_span.event(name="sequential_execution_completed", level="DEFAULT",
351
+ # status_message=(f"Sequential execution completed for {len(results)} tools (out of {len(tool_calls)} total)"))
359
352
  return results
360
353
 
361
354
 
@@ -366,8 +359,8 @@ class TaskResponseProcessor(ABC):
366
359
  try:
367
360
  tool_names = [t.get('function_name', 'unknown') for t in tool_calls]
368
361
  logging.info(f"Executing {len(tool_calls)} tools in parallel: {tool_names}")
369
- langfuse.create_event(trace_context=self.trace_context, name="executing_tools_in_parallel", level="DEFAULT",
370
- status_message=(f"Executing {len(tool_calls)} tools in parallel: {tool_names}"))
362
+ # self.root_span.event(name="executing_tools_in_parallel", level="DEFAULT",
363
+ # status_message=(f"Executing {len(tool_calls)} tools in parallel: {tool_names}"))
371
364
 
372
365
  # Create tasks for all tool calls
373
366
  tasks = [self._execute_tool(tool_call) for tool_call in tool_calls]
@@ -380,7 +373,7 @@ class TaskResponseProcessor(ABC):
380
373
  for i, (tool_call, result) in enumerate(zip(tool_calls, results)):
381
374
  if isinstance(result, Exception):
382
375
  logging.error(f"Error executing tool {tool_call.get('function_name', 'unknown')}: {str(result)}")
383
- langfuse.create_event(trace_context=self.trace_context, name="error_executing_tool", level="ERROR", status_message=(
376
+ self.root_span.event(name="error_executing_tool", level="ERROR", status_message=(
384
377
  f"Error executing tool {tool_call.get('function_name', 'unknown')}: {str(result)}"))
385
378
  # Create error result
386
379
  error_result = XGAToolResult(success=False, output=f"Error executing tool: {str(result)}")
@@ -389,13 +382,13 @@ class TaskResponseProcessor(ABC):
389
382
  processed_results.append((tool_call, result))
390
383
 
391
384
  logging.info(f"Parallel execution completed for {len(tool_calls)} tools")
392
- langfuse.create_event(trace_context=self.trace_context, name="parallel_execution_completed", level="DEFAULT",
393
- status_message=(f"Parallel execution completed for {len(tool_calls)} tools"))
385
+ # self.root_span.event(name="parallel_execution_completed", level="DEFAULT",
386
+ # status_message=(f"Parallel execution completed for {len(tool_calls)} tools"))
394
387
  return processed_results
395
388
 
396
389
  except Exception as e:
397
390
  logging.error(f"Error in parallel tool execution: {str(e)}", exc_info=True)
398
- langfuse.create_event(trace_context=self.trace_context, name="error_in_parallel_tool_execution", level="ERROR",
391
+ self.root_span.event(name="error_in_parallel_tool_execution", level="ERROR",
399
392
  status_message=(f"Error in parallel tool execution: {str(e)}"))
400
393
  # Return error results for all tools if the gather itself fails
401
394
  return [(tool_call, XGAToolResult(success=False, output=f"Execution error: {str(e)}"))
@@ -417,16 +410,11 @@ class TaskResponseProcessor(ABC):
417
410
  if assistant_message_id:
418
411
  metadata["assistant_message_id"] = assistant_message_id
419
412
  logging.info(f"Linking tool result to assistant message: {assistant_message_id}")
420
- langfuse.create_event(trace_context=self.trace_context, name="linking_tool_result_to_assistant_message", level="DEFAULT",
421
- status_message=(f"Linking tool result to assistant message: {assistant_message_id}"))
422
413
 
423
414
  # --- Add parsing details to metadata if available ---
424
415
  if parsing_details:
425
416
  metadata["parsing_details"] = parsing_details
426
417
  logging.info("Adding parsing_details to tool result metadata")
427
- langfuse.create_event(trace_context=self.trace_context, name="adding_parsing_details_to_tool_result_metadata", level="DEFAULT",
428
- status_message=(f"Adding parsing_details to tool result metadata"),
429
- metadata={"parsing_details": parsing_details})
430
418
 
431
419
  # For XML and other non-native tools, use the new structured format
432
420
  # Determine message role based on strategy
@@ -471,7 +459,7 @@ class TaskResponseProcessor(ABC):
471
459
  return message_obj # Return the modified message object
472
460
  except Exception as e:
473
461
  logging.error(f"Error adding tool result: {str(e)}", exc_info=True)
474
- langfuse.create_event(trace_context=self.trace_context, name="error_adding_tool_result", level="ERROR",
462
+ self.root_span.event(name="error_adding_tool_result", level="ERROR",
475
463
  status_message=(f"Error adding tool result: {str(e)}"),
476
464
  metadata={"tool_call": tool_call, "result": result, "strategy": strategy,
477
465
  "assistant_message_id": assistant_message_id,
@@ -491,7 +479,7 @@ class TaskResponseProcessor(ABC):
491
479
  return message_obj # Return the full message object
492
480
  except Exception as e2:
493
481
  logging.error(f"Failed even with fallback message: {str(e2)}", exc_info=True)
494
- langfuse.create_event(trace_context=self.trace_context, name="failed_even_with_fallback_message", level="ERROR",
482
+ self.root_span.event(name="failed_even_with_fallback_message", level="ERROR",
495
483
  status_message=(f"Failed even with fallback message: {str(e2)}"),
496
484
  metadata={"tool_call": tool_call, "result": result, "strategy": strategy,
497
485
  "assistant_message_id": assistant_message_id,
@@ -599,8 +587,8 @@ class TaskResponseProcessor(ABC):
599
587
  if context.function_name in ['ask', 'complete']:
600
588
  metadata["agent_should_terminate"] = "true"
601
589
  logging.info(f"Marking tool status for '{context.function_name}' with termination signal.")
602
- langfuse.create_event(trace_context=self.trace_context, name="marking_tool_status_for_termination", level="DEFAULT", status_message=(
603
- f"Marking tool status for '{context.function_name}' with termination signal."))
590
+ # self.root_span.event(name="marking_tool_status_for_termination", level="DEFAULT", status_message=(
591
+ # f"Marking tool status for '{context.function_name}' with termination signal."))
604
592
  # <<< END ADDED >>>
605
593
 
606
594
  return self.add_response_message(
@@ -7,7 +7,6 @@ from dataclasses import dataclass
7
7
  from datetime import datetime, timezone
8
8
  from typing import List, Dict, Any, Optional, AsyncGenerator, override, Literal
9
9
 
10
- from xgae.utils import langfuse
11
10
  from xgae.engine.responser.responser_base import TaskResponseProcessor, TaskResponserContext,TaskRunContinuousState,XmlAddingStrategy,ToolExecutionStrategy
12
11
  from xgae.utils.json_helpers import (
13
12
  ensure_dict, safe_json_parse,
@@ -205,7 +204,7 @@ class StreamTaskResponser(TaskResponseProcessor):
205
204
  __sequence += 1
206
205
  else:
207
206
  logging.info("XML tool call limit reached - not yielding more content chunks")
208
- langfuse.create_event(trace_context=self.trace_context, name="xml_tool_call_limit_reached", level="DEFAULT", status_message=(
207
+ self.root_span.event(name="xml_tool_call_limit_reached", level="DEFAULT", status_message=(
209
208
  f"XML tool call limit reached - not yielding more content chunks"))
210
209
 
211
210
  # --- Process XML Tool Calls (if enabled and limit not reached) ---
@@ -317,7 +316,7 @@ class StreamTaskResponser(TaskResponseProcessor):
317
316
 
318
317
  if finish_reason == "xml_tool_limit_reached":
319
318
  logging.info("Stopping stream processing after loop due to XML tool call limit")
320
- langfuse.create_event(trace_context=self.trace_context, name="stopping_stream_processing_after_loop_due_to_xml_tool_call_limit",
319
+ self.root_span.event(name="stopping_stream_processing_after_loop_due_to_xml_tool_call_limit",
321
320
  level="DEFAULT", status_message=(
322
321
  f"Stopping stream processing after loop due to XML tool call limit"))
323
322
  break
@@ -352,18 +351,18 @@ class StreamTaskResponser(TaskResponseProcessor):
352
351
  # f"🔥 Estimated tokens – prompt: {prompt_tokens}, "
353
352
  # f"completion: {completion_tokens}, total: {prompt_tokens + completion_tokens}"
354
353
  # )
355
- langfuse.create_event(trace_context=self.trace_context, name="usage_calculated_with_litellm_token_counter", level="DEFAULT",
354
+ self.root_span.event(name="usage_calculated_with_litellm_token_counter", level="DEFAULT",
356
355
  status_message=(f"Usage calculated with litellm.token_counter"))
357
356
  except Exception as e:
358
357
  logging.warning(f"Failed to calculate usage: {str(e)}")
359
- langfuse.create_event(trace_context=self.trace_context, name="failed_to_calculate_usage", level="WARNING",
358
+ self.root_span.event(name="failed_to_calculate_usage", level="WARNING",
360
359
  status_message=(f"Failed to calculate usage: {str(e)}"))
361
360
 
362
361
  # Wait for pending tool executions from streaming phase
363
362
  tool_results_buffer = [] # Stores (tool_call, result, tool_index, context)
364
363
  if pending_tool_executions:
365
364
  logging.info(f"Waiting for {len(pending_tool_executions)} pending streamed tool executions")
366
- langfuse.create_event(trace_context=self.trace_context, name="waiting_for_pending_streamed_tool_executions", level="DEFAULT", status_message=(
365
+ self.root_span.event(name="waiting_for_pending_streamed_tool_executions", level="DEFAULT", status_message=(
367
366
  f"Waiting for {len(pending_tool_executions)} pending streamed tool executions"))
368
367
  # ... (asyncio.wait logic) ...
369
368
  pending_tasks = [execution["task"] for execution in pending_tool_executions]
@@ -387,19 +386,19 @@ class StreamTaskResponser(TaskResponseProcessor):
387
386
  if tool_name in ['ask', 'complete']:
388
387
  logging.info(
389
388
  f"Terminating tool '{tool_name}' completed during streaming. Setting termination flag.")
390
- langfuse.create_event(trace_context=self.trace_context, name="terminating_tool_completed_during_streaming",
389
+ self.root_span.event(name="terminating_tool_completed_during_streaming",
391
390
  level="DEFAULT", status_message=(
392
391
  f"Terminating tool '{tool_name}' completed during streaming. Setting termination flag."))
393
392
  agent_should_terminate = True
394
393
 
395
394
  else: # Should not happen with asyncio.wait
396
395
  logging.warning(f"Task for tool index {tool_idx} not done after wait.")
397
- langfuse.create_event(trace_context=self.trace_context, name="task_for_tool_index_not_done_after_wait", level="WARNING",
396
+ self.root_span.event(name="task_for_tool_index_not_done_after_wait", level="WARNING",
398
397
  status_message=(
399
398
  f"Task for tool index {tool_idx} not done after wait."))
400
399
  except Exception as e:
401
400
  logging.error(f"Error getting result for pending tool execution {tool_idx}: {str(e)}")
402
- langfuse.create_event(trace_context=self.trace_context, name="error_getting_result_for_pending_tool_execution", level="ERROR",
401
+ self.root_span.event(name="error_getting_result_for_pending_tool_execution", level="ERROR",
403
402
  status_message=(
404
403
  f"Error getting result for pending tool execution {tool_idx}: {str(e)}"))
405
404
  context.error = e
@@ -419,7 +418,7 @@ class StreamTaskResponser(TaskResponseProcessor):
419
418
  if tool_name in ['ask', 'complete']:
420
419
  logging.info(
421
420
  f"Terminating tool '{tool_name}' completed during streaming. Setting termination flag.")
422
- langfuse.create_event(trace_context=self.trace_context, name="terminating_tool_completed_during_streaming", level="DEFAULT",
421
+ self.root_span.event(name="terminating_tool_completed_during_streaming", level="DEFAULT",
423
422
  status_message=(
424
423
  f"Terminating tool '{tool_name}' completed during streaming. Setting termination flag."))
425
424
  agent_should_terminate = True
@@ -432,7 +431,7 @@ class StreamTaskResponser(TaskResponseProcessor):
432
431
  except Exception as e:
433
432
  logging.error(
434
433
  f"Error getting result/yielding status for pending tool execution {tool_idx}: {str(e)}")
435
- langfuse.create_event(trace_context=self.trace_context, name="error_getting_result_yielding_status_for_pending_tool_execution",
434
+ self.root_span.event(name="error_getting_result_yielding_status_for_pending_tool_execution",
436
435
  level="ERROR", status_message=(
437
436
  f"Error getting result/yielding status for pending tool execution {tool_idx}: {str(e)}"))
438
437
  context.error = e
@@ -451,7 +450,7 @@ class StreamTaskResponser(TaskResponseProcessor):
451
450
  if finish_msg_obj: yield format_for_yield(finish_msg_obj)
452
451
  logging.info(
453
452
  f"Stream finished with reason: xml_tool_limit_reached after {xml_tool_call_count} XML tool calls")
454
- langfuse.create_event(trace_context=self.trace_context, name="stream_finished_with_reason_xml_tool_limit_reached_after_xml_tool_calls",
453
+ self.root_span.event(name="stream_finished_with_reason_xml_tool_limit_reached_after_xml_tool_calls",
455
454
  level="DEFAULT", status_message=(
456
455
  f"Stream finished with reason: xml_tool_limit_reached after {xml_tool_call_count} XML tool calls"))
457
456
 
@@ -501,7 +500,7 @@ class StreamTaskResponser(TaskResponseProcessor):
501
500
  yield format_for_yield(yield_message)
502
501
  else:
503
502
  logging.error(f"Failed to save final assistant message for thread {thread_id}")
504
- langfuse.create_event(trace_context=self.trace_context, name="failed_to_save_final_assistant_message_for_thread", level="ERROR",
503
+ self.root_span.event(name="failed_to_save_final_assistant_message_for_thread", level="ERROR",
505
504
  status_message=(f"Failed to save final assistant message for thread {thread_id}"))
506
505
  # Save and yield an error status
507
506
  err_content = {"role": "system", "status_type": "error",
@@ -569,7 +568,7 @@ class StreamTaskResponser(TaskResponseProcessor):
569
568
  # Populate from buffer if executed on stream
570
569
  if config.execute_on_stream and tool_results_buffer:
571
570
  logging.info(f"Processing {len(tool_results_buffer)} buffered tool results")
572
- langfuse.create_event(trace_context=self.trace_context, name="processing_buffered_tool_results", level="DEFAULT",
571
+ self.root_span.event(name="processing_buffered_tool_results", level="DEFAULT",
573
572
  status_message=(f"Processing {len(tool_results_buffer)} buffered tool results"))
574
573
  for tool_call, result, tool_idx, context in tool_results_buffer:
575
574
  if last_assistant_message_object: context.assistant_message_id = last_assistant_message_object[
@@ -580,7 +579,7 @@ class StreamTaskResponser(TaskResponseProcessor):
580
579
  elif final_tool_calls_to_process and not config.execute_on_stream:
581
580
  logging.info(
582
581
  f"Executing {len(final_tool_calls_to_process)} tools ({config.tool_execution_strategy}) after stream")
583
- langfuse.create_event(trace_context=self.trace_context, name="executing_tools_after_stream", level="DEFAULT", status_message=(
582
+ self.root_span.event(name="executing_tools_after_stream", level="DEFAULT", status_message=(
584
583
  f"Executing {len(final_tool_calls_to_process)} tools ({config.tool_execution_strategy}) after stream"))
585
584
  results_list = await self._execute_tools(final_tool_calls_to_process,
586
585
  config.tool_execution_strategy)
@@ -598,14 +597,14 @@ class StreamTaskResponser(TaskResponseProcessor):
598
597
  tool_results_map[current_tool_idx] = (tc, res, context)
599
598
  else:
600
599
  logging.warning(f"Could not map result for tool index {current_tool_idx}")
601
- langfuse.create_event(trace_context=self.trace_context, name="could_not_map_result_for_tool_index", level="WARNING",
600
+ self.root_span.event(name="could_not_map_result_for_tool_index", level="WARNING",
602
601
  status_message=(f"Could not map result for tool index {current_tool_idx}"))
603
602
  current_tool_idx += 1
604
603
 
605
604
  # Save and Yield each result message
606
605
  if tool_results_map:
607
606
  logging.info(f"Saving and yielding {len(tool_results_map)} final tool result messages")
608
- langfuse.create_event(trace_context=self.trace_context, name="saving_and_yielding_final_tool_result_messages", level="DEFAULT",
607
+ self.root_span.event(name="saving_and_yielding_final_tool_result_messages", level="DEFAULT",
609
608
  status_message=(
610
609
  f"Saving and yielding {len(tool_results_map)} final tool result messages"))
611
610
  for tool_idx in sorted(tool_results_map.keys()):
@@ -640,7 +639,7 @@ class StreamTaskResponser(TaskResponseProcessor):
640
639
  else:
641
640
  logging.error(
642
641
  f"Failed to save tool result for index {tool_idx}, not yielding result message.")
643
- langfuse.create_event(trace_context=self.trace_context, name="failed_to_save_tool_result_for_index", level="ERROR",
642
+ self.root_span.event(name="failed_to_save_tool_result_for_index", level="ERROR",
644
643
  status_message=(
645
644
  f"Failed to save tool result for index {tool_idx}, not yielding result message."))
646
645
  # Optionally yield error status for saving failure?
@@ -658,7 +657,7 @@ class StreamTaskResponser(TaskResponseProcessor):
658
657
  if agent_should_terminate:
659
658
  logging.info(
660
659
  "Agent termination requested after executing ask/complete tool. Stopping further processing.")
661
- langfuse.create_event(trace_context=self.trace_context, name="agent_termination_requested", level="DEFAULT",
660
+ self.root_span.event(name="agent_termination_requested", level="DEFAULT",
662
661
  status_message="Agent termination requested after executing ask/complete tool. Stopping further processing.")
663
662
 
664
663
  # Set finish reason to indicate termination
@@ -719,7 +718,7 @@ class StreamTaskResponser(TaskResponseProcessor):
719
718
  logging.info("Assistant response end saved for stream (before termination)")
720
719
  except Exception as e:
721
720
  logging.error(f"Error saving assistant response end for stream (before termination): {str(e)}")
722
- langfuse.create_event(trace_context=self.trace_context, name="error_saving_assistant_response_end_for_stream_before_termination",
721
+ self.root_span.event(name="error_saving_assistant_response_end_for_stream_before_termination",
723
722
  level="ERROR", status_message=(
724
723
  f"Error saving assistant response end for stream (before termination): {str(e)}"))
725
724
 
@@ -775,12 +774,12 @@ class StreamTaskResponser(TaskResponseProcessor):
775
774
  logging.info("Assistant response end saved for stream")
776
775
  except Exception as e:
777
776
  logging.error(f"Error saving assistant response end for stream: {str(e)}")
778
- langfuse.create_event(trace_context=self.trace_context, name="error_saving_assistant_response_end_for_stream", level="ERROR",
777
+ self.root_span.event(name="error_saving_assistant_response_end_for_stream", level="ERROR",
779
778
  status_message=(f"Error saving assistant response end for stream: {str(e)}"))
780
779
 
781
780
  except Exception as e:
782
781
  logging.error(f"Error processing stream: {str(e)}", exc_info=True)
783
- langfuse.create_event(trace_context=self.trace_context, name="error_processing_stream", level="ERROR",
782
+ self.root_span.event(name="error_processing_stream", level="ERROR",
784
783
  status_message=(f"Error processing stream: {str(e)}"))
785
784
  # Save and yield error status message
786
785
 
@@ -794,12 +793,12 @@ class StreamTaskResponser(TaskResponseProcessor):
794
793
  if err_msg_obj: yield format_for_yield(err_msg_obj) # Yield the saved error message
795
794
  # Re-raise the same exception (not a new one) to ensure proper error propagation
796
795
  logging.critical(f"Re-raising error to stop further processing: {str(e)}")
797
- langfuse.create_event(trace_context=self.trace_context, name="re_raising_error_to_stop_further_processing", level="ERROR",
796
+ self.root_span.event(name="re_raising_error_to_stop_further_processing", level="ERROR",
798
797
  status_message=(f"Re-raising error to stop further processing: {str(e)}"))
799
798
  else:
800
799
  logging.error(f"AnthropicException - Overloaded detected - Falling back to OpenRouter: {str(e)}",
801
800
  exc_info=True)
802
- langfuse.create_event(trace_context=self.trace_context, name="anthropic_exception_overloaded_detected", level="ERROR", status_message=(
801
+ self.root_span.event(name="anthropic_exception_overloaded_detected", level="ERROR", status_message=(
803
802
  f"AnthropicException - Overloaded detected - Falling back to OpenRouter: {str(e)}"))
804
803
  raise # Use bare 'raise' to preserve the original exception with its traceback
805
804
 
@@ -822,5 +821,5 @@ class StreamTaskResponser(TaskResponseProcessor):
822
821
  if end_msg_obj: yield format_for_yield(end_msg_obj)
823
822
  except Exception as final_e:
824
823
  logging.error(f"Error in finally block: {str(final_e)}", exc_info=True)
825
- langfuse.create_event(trace_context=self.trace_context, name="error_in_finally_block", level="ERROR",
824
+ self.root_span.event(name="error_in_finally_block", level="ERROR",
826
825
  status_message=(f"Error in finally block: {str(final_e)}"))
@@ -1,6 +1,7 @@
1
1
 
2
2
  import logging
3
3
  import json
4
+ import os
4
5
 
5
6
  from typing import List, Any, Dict, Optional, AsyncGenerator, Union, Literal
6
7
  from uuid import uuid4
@@ -8,20 +9,26 @@ from uuid import uuid4
8
9
  from xgae.engine.responser.responser_base import TaskResponserContext, TaskResponseProcessor, TaskRunContinuousState
9
10
  from xgae.engine.engine_base import XGAResponseMsgType, XGAResponseMessage, XGAToolBox, XGATaskResult
10
11
 
11
- from xgae.utils import langfuse, handle_error
12
- from xgae.utils.llm_client import LLMClient, LLMConfig
12
+ from xgae.utils import handle_error
13
+ from xgae.utils.setup_env import langfuse
14
+
15
+ from xgae.utils.llm_client import LLMClient, LLMConfig, LangfuseMetadata
13
16
 
14
17
  from xgae.utils.json_helpers import format_for_yield
15
- from prompt_builder import XGAPromptBuilder
16
- from mcp_tool_box import XGAMcpToolBox
18
+ from xgae.engine.prompt_builder import XGAPromptBuilder
19
+ from xgae.engine.mcp_tool_box import XGAMcpToolBox
20
+
17
21
 
18
22
  class XGATaskEngine:
19
23
  def __init__(self,
20
24
  session_id: Optional[str] = None,
21
25
  task_id: Optional[str] = None,
22
26
  agent_id: Optional[str] = None,
23
- trace_id: Optional[str] = None,
27
+ general_tools: Optional[List[str]] = None,
28
+ custom_tools: Optional[List[str]] = None,
24
29
  system_prompt: Optional[str] = None,
30
+ max_auto_run: Optional[int] = None,
31
+ tool_exec_parallel: Optional[bool] = None,
25
32
  llm_config: Optional[LLMConfig] = None,
26
33
  prompt_builder: Optional[XGAPromptBuilder] = None,
27
34
  tool_box: Optional[XGAToolBox] = None):
@@ -34,125 +41,100 @@ class XGATaskEngine:
34
41
  self.is_stream = self.llm_client.is_stream
35
42
 
36
43
  self.prompt_builder = prompt_builder or XGAPromptBuilder(system_prompt)
37
- self.tool_box = tool_box or XGAMcpToolBox()
44
+ self.tool_box: XGAToolBox = tool_box or XGAMcpToolBox()
38
45
 
46
+ self.general_tools:List[str] = general_tools
47
+ self.custom_tools:List[str] = custom_tools
39
48
  self.task_response_msgs: List[XGAResponseMessage] = []
40
- self.task_no = -1
41
- self.task_run_id = f"{self.task_id}[{self.task_no}]"
42
- self.trace_id :str = trace_id or langfuse.create_trace_id()
43
49
 
44
- async def _post_init_(self, general_tools:List[str], custom_tools: List[str]) -> None:
45
- await self.tool_box.load_mcp_tools_schema()
46
- await self.tool_box.creat_task_tool_box(self.task_id, general_tools, custom_tools)
47
- general_tool_schemas = self.tool_box.get_task_tool_schemas(self.task_id, "general_tool")
48
- custom_tool_schemas = self.tool_box.get_task_tool_schemas(self.task_id, "custom_tool")
50
+ max_auto_run = max_auto_run if max_auto_run else int(os.getenv("MAX_AUTO_RUN", 15))
51
+ self.max_auto_run: int = 1 if max_auto_run <= 1 else max_auto_run
52
+ self.tool_exec_parallel = True if tool_exec_parallel is None else tool_exec_parallel
49
53
 
50
- self.task_prompt = self.prompt_builder.build_task_prompt(self.model_name, general_tool_schemas, custom_tool_schemas)
54
+ self.task_no = -1
55
+ self.task_run_id :str = None
51
56
 
52
- @classmethod
53
- async def create(cls,
54
- session_id: Optional[str] = None,
55
- task_id: Optional[str] = None,
56
- agent_id: Optional[str] = None,
57
- trace_id: Optional[str] = None,
58
- system_prompt: Optional[str] = None,
59
- general_tools: Optional[List[str]] = None,
60
- custom_tools: Optional[List[str]] = None,
61
- llm_config: Optional[LLMConfig] = None,
62
- prompt_builder: Optional[XGAPromptBuilder] = None,
63
- tool_box: Optional[XGAToolBox] = None) -> 'XGATaskEngine':
64
- engine: XGATaskEngine = cls(session_id=session_id,
65
- task_id=task_id,
66
- agent_id=agent_id,
67
- trace_id=trace_id,
68
- system_prompt=system_prompt,
69
- llm_config=llm_config,
70
- prompt_builder=prompt_builder,
71
- tool_box=tool_box)
57
+ self.task_prompt :str = None
58
+ self.trace_id :str = None
59
+ self.root_span_id :str = None
60
+ self.root_span_name :str = None
72
61
 
73
- general_tools = general_tools or ["complete", "ask"]
74
- if "*" not in general_tools:
75
- if "complete" not in general_tools:
76
- general_tools.append("complete")
77
- elif "ask" not in general_tools:
78
- general_tools.append("ask")
62
+ async def run_task_with_final_answer(self,
63
+ task_message: Dict[str, Any],
64
+ trace_id: Optional[str] = None) -> XGATaskResult:
65
+ final_result:XGATaskResult = None
66
+ try:
67
+ self._init_langfuse("run_task_with_final_answer", task_message, trace_id)
68
+ chunks = []
69
+ async for chunk in self.run_task(task_message=task_message, trace_id=trace_id):
70
+ chunks.append(chunk)
79
71
 
80
- custom_tools = custom_tools or []
81
- await engine._post_init_(general_tools, custom_tools)
72
+ if len(chunks) > 0:
73
+ final_result = self._parse_final_result(chunks)
74
+ else:
75
+ final_result = XGATaskResult(type="error", content="LLM Answer is Empty")
82
76
 
83
- logging.info("*"*30 + f" XGATaskEngine Task'{engine.task_id}' Initialized " + "*"*30)
84
- logging.info(f"model_name={engine.model_name}, is_stream={engine.is_stream}, trace_id={engine.trace_id}")
85
- logging.info(f"general_tools={general_tools}, custom_tools={custom_tools}")
77
+ return final_result
78
+ finally:
79
+ self._end_langfuse("run_task_with_final_answer", final_result)
86
80
 
87
- return engine
88
-
89
- async def run_task_with_final_answer(self,
90
- task_message: Dict[str, Any],
91
- max_auto_run: int = 25,
92
- trace_id: Optional[str] = None) -> XGATaskResult:
93
- chunks = []
94
- async for chunk in self.run_task(task_message=task_message, max_auto_run=max_auto_run, trace_id=trace_id):
95
- chunks.append(chunk)
96
- if len(chunks) > 0:
97
- final_result = self._parse_final_result(chunks)
98
- else:
99
- final_result = XGATaskResult(type="error", content="LLM Answer is Empty")
100
- return final_result
101
81
 
102
82
  async def run_task(self,
103
83
  task_message: Dict[str, Any],
104
- max_auto_run: int = 25,
105
84
  trace_id: Optional[str] = None) -> AsyncGenerator[Dict[str, Any], None]:
106
85
  try:
107
- self.trace_id = trace_id or self.trace_id or langfuse.create_trace_id()
108
-
109
- self.task_no += 1
110
- self.task_run_id = f"{self.task_id}[{self.task_no}]"
86
+ await self._init_task()
87
+ self._init_langfuse("run_task", task_message, trace_id)
111
88
 
112
89
  self.add_response_message(type="user", content=task_message, is_llm_message=True)
113
90
 
114
- continuous_state: TaskRunContinuousState = {
115
- "accumulated_content": "",
116
- "auto_continue_count": 0,
117
- "auto_continue": False if max_auto_run <= 1 else True,
118
- "max_auto_run": max_auto_run
119
- }
120
- async for chunk in self._run_task_auto(continuous_state):
91
+ async for chunk in self._run_task_auto():
121
92
  yield chunk
122
93
  finally:
123
94
  await self.tool_box.destroy_task_tool_box(self.task_id)
95
+ self._end_langfuse("run_task")
124
96
 
125
- async def _run_task_once(self, continuous_state: TaskRunContinuousState) -> AsyncGenerator[Dict[str, Any], None]:
126
- llm_messages = [{"role": "system", "content": self.task_prompt}]
127
- cxt_llm_contents = self.get_history_llm_messages()
128
- llm_messages.extend(cxt_llm_contents)
129
97
 
130
- partial_content = continuous_state.get('accumulated_content', '')
131
- if partial_content:
132
- temp_assistant_message = {
133
- "role": "assistant",
134
- "content": partial_content
135
- }
136
- llm_messages.append(temp_assistant_message)
98
+ async def _init_task(self) -> None:
99
+ self.task_no = self.task_no + 1
100
+ self.task_run_id = f"{self.task_id}[{self.task_no}]"
137
101
 
138
- llm_response = await self.llm_client.create_completion(llm_messages)
139
- response_processor = self._create_response_processer()
102
+ general_tools = self.general_tools or ["complete", "ask"]
103
+ if "*" not in general_tools:
104
+ if "complete" not in general_tools:
105
+ general_tools.append("complete")
106
+ elif "ask" not in general_tools:
107
+ general_tools.append("ask")
140
108
 
141
- async for chunk in response_processor.process_response(llm_response, llm_messages, continuous_state):
142
- self._logging_reponse_chunk(chunk)
143
- yield chunk
109
+ custom_tools = self.custom_tools or []
110
+ if isinstance(self.tool_box, XGAMcpToolBox):
111
+ await self.tool_box.load_mcp_tools_schema()
112
+
113
+ await self.tool_box.creat_task_tool_box(self.task_id, general_tools, custom_tools)
114
+ general_tool_schemas = self.tool_box.get_task_tool_schemas(self.task_id, "general_tool")
115
+ custom_tool_schemas = self.tool_box.get_task_tool_schemas(self.task_id, "custom_tool")
116
+
117
+ self.task_prompt = self.prompt_builder.build_task_prompt(self.model_name, general_tool_schemas, custom_tool_schemas)
118
+
119
+ logging.info("*" * 30 + f" XGATaskEngine Task'{self.task_id}' Initialized " + "*" * 30)
120
+ logging.info(f"model_name={self.model_name}, is_stream={self.is_stream}, trace_id={self.trace_id}")
121
+ logging.info(f"general_tools={general_tools}, custom_tools={custom_tools}")
144
122
 
145
- async def _run_task_auto(self, continuous_state: TaskRunContinuousState) -> AsyncGenerator[Dict[str, Any], None]:
146
- max_auto_run = continuous_state['max_auto_run']
147
- max_auto_run = max_auto_run if max_auto_run > 0 else 1
148
123
 
124
+ async def _run_task_auto(self) -> AsyncGenerator[Dict[str, Any], None]:
149
125
  def update_continuous_state(_auto_continue_count, _auto_continue):
150
126
  continuous_state["auto_continue_count"] = _auto_continue_count
151
127
  continuous_state["auto_continue"] = _auto_continue
152
128
 
129
+ continuous_state: TaskRunContinuousState = {
130
+ "accumulated_content": "",
131
+ "auto_continue_count": 0,
132
+ "auto_continue": False if self.max_auto_run <= 1 else True
133
+ }
134
+
153
135
  auto_continue_count = 0
154
136
  auto_continue = True
155
- while auto_continue and auto_continue_count < max_auto_run:
137
+ while auto_continue and auto_continue_count < self.max_auto_run:
156
138
  auto_continue = False
157
139
 
158
140
  try:
@@ -180,7 +162,7 @@ class XGATaskEngine:
180
162
  auto_continue = True
181
163
  auto_continue_count += 1
182
164
  update_continuous_state(auto_continue_count, auto_continue)
183
- logging.info(f"run_task_auto: Detected finish_reason='{finish_reason}', auto-continuing ({auto_continue_count}/{max_auto_run})")
165
+ logging.info(f"run_task_auto: Detected finish_reason='{finish_reason}', auto-continuing ({auto_continue_count}/{self.max_auto_run})")
184
166
  except Exception as parse_error:
185
167
  logging.error(f"run_task_auto: Error in parse chunk: {str(parse_error)}")
186
168
  content = {"role": "system", "status_type": "error", "message": "Parse response chunk Error"}
@@ -194,6 +176,29 @@ class XGATaskEngine:
194
176
  error_msg = self.add_response_message(type="status", content=content, is_llm_message=False)
195
177
  yield format_for_yield(error_msg)
196
178
 
179
+
180
+ async def _run_task_once(self, continuous_state: TaskRunContinuousState) -> AsyncGenerator[Dict[str, Any], None]:
181
+ llm_messages = [{"role": "system", "content": self.task_prompt}]
182
+ cxt_llm_contents = self.get_history_llm_messages()
183
+ llm_messages.extend(cxt_llm_contents)
184
+
185
+ partial_content = continuous_state.get('accumulated_content', '')
186
+ if partial_content:
187
+ temp_assistant_message = {
188
+ "role": "assistant",
189
+ "content": partial_content
190
+ }
191
+ llm_messages.append(temp_assistant_message)
192
+
193
+ llm_count = continuous_state.get("auto_continue_count")
194
+ langfuse_metadata = self._create_llm_langfuse_meta(llm_count)
195
+ llm_response = await self.llm_client.create_completion(llm_messages, langfuse_metadata)
196
+ response_processor = self._create_response_processer()
197
+
198
+ async for chunk in response_processor.process_response(llm_response, llm_messages, continuous_state):
199
+ self._logging_reponse_chunk(chunk)
200
+ yield chunk
201
+
197
202
  def _parse_final_result(self, chunks: List[Dict[str, Any]]) -> XGATaskResult:
198
203
  final_result: XGATaskResult = None
199
204
  try:
@@ -245,6 +250,7 @@ class XGATaskEngine:
245
250
 
246
251
  return final_result
247
252
 
253
+
248
254
  def add_response_message(self, type: XGAResponseMsgType,
249
255
  content: Union[Dict[str, Any], List[Any], str],
250
256
  is_llm_message: bool,
@@ -289,6 +295,41 @@ class XGATaskEngine:
289
295
 
290
296
  return response_llm_contents
291
297
 
298
+
299
+ def _create_llm_langfuse_meta(self, llm_count:int)-> LangfuseMetadata:
300
+ generation_name = f"xga_task_engine_llm_completion[{self.task_no}]({llm_count})"
301
+ generation_id = f"{self.task_run_id}({llm_count})"
302
+ return LangfuseMetadata(
303
+ generation_name=generation_name,
304
+ generation_id=generation_id,
305
+ existing_trace_id=self.trace_id,
306
+ session_id=self.session_id,
307
+ )
308
+
309
+ def _init_langfuse(self,
310
+ root_span_name: str,
311
+ task_message: Dict[str, Any],
312
+ trace_id: Optional[str] = None):
313
+
314
+ if self.root_span_id is None:
315
+ trace = None
316
+ if trace_id:
317
+ self.trace_id = trace_id
318
+ trace = langfuse.trace(id=trace_id)
319
+ else:
320
+ trace = langfuse.trace(name="xga_task_engine")
321
+ self.trace_id = trace.id
322
+
323
+ span = trace.span(name=root_span_name, input=task_message,metadata={"task_id": self.task_id})
324
+ self.root_span_id = span.id
325
+ self.root_span_name = root_span_name
326
+
327
+ def _end_langfuse(self, root_span_name:str, output: Optional[XGATaskResult]=None):
328
+ if self.root_span_id and self.root_span_name == root_span_name:
329
+ langfuse.span(trace_id=self.trace_id, id=self.root_span_id).end(output=output)
330
+ self.root_span_id = None
331
+ self.root_span_name = None
332
+
292
333
  def _create_response_processer(self) -> TaskResponseProcessor:
293
334
  response_context = self._create_response_context()
294
335
  is_stream = response_context.get("is_stream", False)
@@ -304,12 +345,14 @@ class XGATaskEngine:
304
345
  "is_stream": self.is_stream,
305
346
  "task_id": self.task_id,
306
347
  "task_run_id": self.task_run_id,
348
+ "task_no": self.task_no,
307
349
  "trace_id": self.trace_id,
350
+ "root_span_id": self.root_span_id,
308
351
  "model_name": self.model_name,
309
352
  "max_xml_tool_calls": 0,
310
353
  "add_response_msg_func": self.add_response_message,
311
354
  "tool_box": self.tool_box,
312
- "tool_execution_strategy": "sequential" ,#"parallel",
355
+ "tool_execution_strategy": "parallel" if self.tool_exec_parallel else "sequential" ,#,
313
356
  "xml_adding_strategy": "user_message",
314
357
  }
315
358
  return response_context
@@ -338,23 +381,17 @@ if __name__ == "__main__":
338
381
 
339
382
  async def main():
340
383
  tool_box = XGAMcpToolBox(custom_mcp_server_file="mcpservers/custom_servers.json")
341
- system_prompt = read_file("templates/scp_test_prompt.txt")
342
- engine = await XGATaskEngine.create(tool_box=tool_box,
343
- general_tools=[],
344
- custom_tools=["bomc_fault.*"],
345
- llm_config=LLMConfig(stream=False),
346
- system_prompt=system_prompt)
347
-
348
- final_result = await engine.run_task_with_final_answer(task_message={"role": "user", "content": "定位10.0.1.1故障"},max_auto_run=8)
384
+ system_prompt = read_file("templates/example_user_prompt.txt")
385
+ engine = XGATaskEngine(tool_box=tool_box,
386
+ general_tools=[],
387
+ custom_tools=["*"],
388
+ llm_config=LLMConfig(stream=False),
389
+ system_prompt=system_prompt,
390
+ max_auto_run=8)
391
+
392
+ final_result = await engine.run_task_with_final_answer(task_message={"role": "user",
393
+ "content": "locate 10.0.0.1 fault and solution"})
349
394
  print("FINAL RESULT:", final_result)
350
395
 
351
- # ==== test streaming response ========
352
- #chunks = []
353
- # async for chunk in engine.run_task(task_message={"role": "user", "content": "定位10.0.0.1的故障"}, max_auto_run=8):
354
- # print(chunk)
355
396
 
356
- # ==== test no tool call ========
357
- # engine = await XGATaskEngine.create(llm_config=LLMConfig(stream=False))
358
- # final_result = await engine.run_task_with_final_answer(task_message={"role": "user", "content": "1+1"}, max_auto_run=2)
359
- # print("FINAL RESULT:", final_result)
360
397
  asyncio.run(main())
@@ -0,0 +1,48 @@
1
+ from typing import Annotated, Optional
2
+ from pydantic import Field
3
+
4
+ from mcp.server.fastmcp import FastMCP
5
+
6
+ from xgae.engine.engine_base import XGAToolResult
7
+
8
+ mcp = FastMCP(name="XGAE Message Tools")
9
+
10
+ @mcp.tool(
11
+ description="""A special tool to indicate you have completed all tasks and are about to enter complete state. Use ONLY when: 1) All tasks in todo.md are marked complete [x], 2) The user's original request has been fully addressed, 3) There are no pending actions or follow-ups required, 4) You've delivered all final outputs and results to the user. IMPORTANT: This is the ONLY way to properly terminate execution. Never use this tool unless ALL tasks are complete and verified. Always ensure you've provided all necessary outputs and references before using this tool. Include relevant attachments when the completion relates to specific files or resources."""
12
+ )
13
+ async def complete(task_id: str,
14
+ text: Annotated[Optional[str], Field(default=None,
15
+ description="Completion summary. Include: 1) Task summary 2) Key deliverables 3) Next steps 4) Impact achieved")],
16
+ attachments: Annotated[Optional[str], Field(default=None,
17
+ description="Comma-separated list of final outputs. Use when: 1) Completion relates to files 2) User needs to review outputs 3) Deliverables in files")]
18
+ ):
19
+ print(f"<XGAETools-complete>: task_id={task_id}, text={text}, attachments={attachments}")
20
+ return XGAToolResult(success=True, output=str({"status": "complete"}))
21
+
22
+
23
+ @mcp.tool(
24
+ description="""Ask user a question and wait for response. Use for: 1) Requesting clarification on ambiguous requirements, 2) Seeking confirmation before proceeding with high-impact changes, 3) Gathering additional information needed to complete a task, 4) Offering options and requesting user preference, 5) Validating assumptions when critical to task success, 6) When encountering unclear or ambiguous results during task execution, 7) When tool results don't match expectations, 8) For natural conversation and follow-up questions, 9) When research reveals multiple entities with the same name, 10) When user requirements are unclear or could be interpreted differently. IMPORTANT: Use this tool when user input is essential to proceed. Always provide clear context and options when applicable. Use natural, conversational language that feels like talking with a helpful friend. Include relevant attachments when the question relates to specific files or resources. CRITICAL: When you discover ambiguity (like multiple people with the same name), immediately stop and ask for clarification rather than making assumptions."""
25
+ )
26
+ async def ask(task_id: str,
27
+ text: Annotated[str, Field(
28
+ description="Question text to present to user. Include: 1) Clear question/request 2) Context why input is needed 3) Available options 4) Impact of choices 5) Relevant constraints")],
29
+ attachments: Annotated[Optional[str], Field(default=None,
30
+ description="Comma-separated list of files/URLs to attach. Use when: 1) Question relates to files/configs 2) User needs to review content 3) Options documented in files 4) Supporting evidence needed")]
31
+ ):
32
+ print(f"<XGAETools-ask>: task_id={task_id}, text={text}, attachments={attachments}")
33
+ return XGAToolResult(success=True, output=str({"status": "Awaiting user response..."}))
34
+
35
+ @mcp.tool(
36
+ description="end task, destroy sandbox"
37
+ )
38
+ async def end_task(task_id: str) :
39
+ print(f"<XGAETools-end_task> task_id: {task_id}")
40
+
41
+
42
+
43
+ def main():
44
+ #print("="*20 + " XGAE Message Tools Sever Started in Stdio mode " + "="*20)
45
+ mcp.run(transport="stdio")
46
+
47
+ if __name__ == "__main__":
48
+ main()
xgae/utils/__init__.py CHANGED
@@ -1,13 +1,9 @@
1
1
  import logging
2
2
 
3
- from .setup_env import setup_langfuse, setup_logging
4
-
5
- setup_logging()
6
- langfuse = setup_langfuse()
7
-
8
3
  def handle_error(e: Exception) -> None:
9
4
  import traceback
10
5
 
11
6
  logging.error("An error occurred: %s", str(e))
12
7
  logging.error("Traceback details:\n%s", traceback.format_exc())
13
8
  raise (e) from e
9
+
xgae/utils/llm_client.py CHANGED
@@ -9,19 +9,29 @@ from typing import Union, Dict, Any, Optional, List, TypedDict
9
9
  from litellm.utils import ModelResponse, CustomStreamWrapper
10
10
  from openai import OpenAIError
11
11
 
12
+ from xgae.utils.setup_env import langfuse
13
+
12
14
  class LLMConfig(TypedDict, total=False):
13
- model: str
14
- model_name: str
15
- model_id: str
16
- api_key: str
17
- api_base: str
18
- temperature: float
19
- max_tokens: int
20
- stream: bool
21
- enable_thinking: bool
22
- reasoning_effort: str
23
- response_format: str
24
- top_p: int
15
+ model: str # Optional Name of the model to use , Override .env LLM_MODEL
16
+ model_name: str # Optional Name of the model to use , use model if empty
17
+ model_id: str # Optional ARN for Bedrock inference profiles, default is None
18
+ api_key: str # Optional API key, Override .env LLM_API_KEY or OS env variable
19
+ api_base: str # Optional API base URL, Override .env LLM_API_BASE
20
+ temperature: float # temperature: Optional Sampling temperature (0-1), Override .env LLM_TEMPERATURE
21
+ max_tokens: int # max_tokens: Optional Maximum tokens in the response, Override .env LLM_MAX_TOKENS
22
+ stream: bool # stream: Optional whether to stream the response, default is True
23
+ enable_thinking: bool # Optional whether to enable thinking, default is False
24
+ reasoning_effort: str # Optional level of reasoning effort, default is ‘low’
25
+ response_format: str # response_format: Optional desired format for the response, default is None
26
+ top_p: int # Optional Top-p sampling parameter, default is None
27
+
28
+
29
+ class LangfuseMetadata(TypedDict, total=False):
30
+ generation_name: str
31
+ generation_id: str
32
+ existing_trace_id: str
33
+ session_id: str
34
+
25
35
 
26
36
  class LLMError(Exception):
27
37
  """Base exception for LLM-related errors."""
@@ -31,26 +41,15 @@ class LLMClient:
31
41
  RATE_LIMIT_DELAY = 30
32
42
  RETRY_DELAY = 0.1
33
43
 
34
- def __init__(self, llm_config: LLMConfig=None) -> None:
35
- """
36
- Arg: llm_config (Optional[Dict[str, Any]], optional)
37
- model: Override default model to use, default set by .env LLM_MODEL
38
- model_name: Optional Name of the model to use , use model if empty
39
- model_id: Optional ARN for Bedrock inference profiles, default is None
40
- api_key: Optional API key, Override .env LLM_API_KEY or OS environment variable
41
- api_base: Optional API base URL, Override .env LLM_API_BASE
42
- temperature: Optional Sampling temperature (0-1), Override .env LLM_TEMPERATURE
43
- max_tokens: Optional Maximum tokens in the response, Override .env LLM_MAX_TOKENS
44
- stream: Optional whether to stream the response, default is True
45
- response_format: Optional desired format for the response, default is None
46
- enable_thinking: Optional whether to enable thinking, default is False
47
- reasoning_effort: Optional level of reasoning effort, default is ‘low’
48
- top_p: Optional Top-p sampling parameter, default is None
49
- """
50
- llm_config = llm_config or LLMConfig()
44
+ langfuse_inited = False
45
+ langfuse_enabled = False
46
+
47
+ def __init__(self, llm_config: LLMConfig=None):
51
48
  litellm.modify_params = True
52
49
  litellm.drop_params = True
50
+ self._init_langfuse()
53
51
 
52
+ llm_config = llm_config or LLMConfig()
54
53
  self.max_retries = int(os.getenv("LLM_MAX_RETRIES", 1))
55
54
 
56
55
  env_llm_model = os.getenv("LLM_MODEL", "openai/qwen3-235b-a22b")
@@ -82,6 +81,20 @@ class LLMClient:
82
81
  self.lite_llm_params = self._prepare_llm_params(llm_config_params)
83
82
  logging.info(f"📡 LLMClient initialed : model={self.model_name}, is_stream={self.is_stream}, enable thinking={self.lite_llm_params['enable_thinking']}")
84
83
 
84
+ @staticmethod
85
+ def _init_langfuse():
86
+ if not LLMClient.langfuse_inited:
87
+ LLMClient.langfuse_inited =True
88
+ if langfuse and langfuse.enabled:
89
+ litellm.success_callback = ["langfuse"]
90
+ litellm.failure_callback = ["langfuse"]
91
+ LLMClient.langfuse_enabled = True
92
+ logging.info("=== LiteLLM Langfuse is enable !")
93
+ else:
94
+ LLMClient.langfuse_enabled = False
95
+ logging.warning("*** LiteLLM Langfuse is disable !")
96
+
97
+
85
98
 
86
99
  def _prepare_llm_params(self, llm_config_params: Dict[str, Any]) -> Dict[str, Any]:
87
100
  prepared_llm_params = llm_config_params.copy()
@@ -205,9 +218,10 @@ class LLMClient:
205
218
  logging.debug(f"LLMClient: Waiting {delay} seconds before retry llm completion...")
206
219
  await asyncio.sleep(delay)
207
220
 
208
-
209
- async def create_completion(self, messages: List[Dict[str, Any]]) -> Union[ModelResponse, CustomStreamWrapper]:
221
+ async def create_completion(self, messages: List[Dict[str, Any]], langfuse_metadata: Optional[LangfuseMetadata]=None) -> Union[ModelResponse, CustomStreamWrapper]:
210
222
  complete_params = self._prepare_complete_params(messages)
223
+ if LLMClient.langfuse_enabled and langfuse_metadata:
224
+ complete_params["metadata"] = langfuse_metadata
211
225
 
212
226
  last_error = None
213
227
  for attempt in range(self.max_retries):
@@ -228,8 +242,10 @@ class LLMClient:
228
242
  if __name__ == "__main__":
229
243
  async def llm_completion():
230
244
  llm_client = LLMClient(LLMConfig(stream=False))
231
- messages = [{"role": "user", "content": "今天是2025年8月15日,北京本周每天温度"}]
232
- response = await llm_client.create_completion(messages)
245
+ messages = [{"role": "user", "content": "1+1="}]
246
+ trace_id = langfuse.trace(name = "xgae_litellm_test").trace_id
247
+
248
+ response = await llm_client.create_completion(messages, trace_id)
233
249
  if llm_client.is_stream:
234
250
  async for chunk in response:
235
251
  choices = chunk.get("choices", [{}])
@@ -242,6 +258,7 @@ if __name__ == "__main__":
242
258
  else:
243
259
  print(response.choices[0].message.content)
244
260
 
261
+
245
262
  asyncio.run(llm_completion())
246
263
 
247
264
 
xgae/utils/setup_env.py CHANGED
@@ -1,22 +1,27 @@
1
1
  import logging
2
2
  import os
3
3
 
4
+ from dotenv import load_dotenv
4
5
  from langfuse import Langfuse
5
6
 
6
- def setup_logging() -> None:
7
+ load_dotenv()
8
+
9
+ def setup_logging() :
10
+ log_enable = bool(os.getenv("LOG_ENABLE", True))
11
+ if not log_enable :
12
+ return
13
+
7
14
  import colorlog
8
- from dotenv import load_dotenv
9
- load_dotenv()
10
15
 
11
16
  env_log_level = os.getenv("LOG_LEVEL", "INFO")
12
- env_log_file = os.getenv("LOG_FILE", "log/xga.log")
17
+ log_file = os.getenv("LOG_FILE", "log/xga.log")
13
18
  log_level = getattr(logging, env_log_level.upper(), logging.INFO)
14
19
 
15
- log_dir = os.path.dirname(env_log_file)
20
+ log_dir = os.path.dirname(log_file)
16
21
  if log_dir and not os.path.exists(log_dir):
17
22
  os.makedirs(log_dir, exist_ok=True)
18
23
  else:
19
- os.remove(env_log_file)
24
+ os.remove(log_file)
20
25
 
21
26
  logger = logging.getLogger()
22
27
  for handler in logger.handlers[:]:
@@ -43,7 +48,7 @@ def setup_logging() -> None:
43
48
  console_handler = logging.StreamHandler()
44
49
  console_handler.setFormatter(console_formatter)
45
50
 
46
- file_handler = logging.FileHandler(env_log_file, encoding='utf-8')
51
+ file_handler = logging.FileHandler(log_file, encoding='utf-8')
47
52
  file_handler.setFormatter(file_formatter)
48
53
 
49
54
  logger.addHandler(console_handler)
@@ -51,28 +56,25 @@ def setup_logging() -> None:
51
56
 
52
57
  logger.setLevel(log_level)
53
58
 
54
- logging.info(f"Logger is initialized, log_level={env_log_level}, log_file={env_log_file}")
59
+ logging.info(f"📡 XGAE_LOGGING is initialized, log_level={env_log_level}, log_file={log_file}")
55
60
 
61
+ setup_logging()
56
62
 
57
63
  def setup_langfuse() -> Langfuse:
58
- _langfuse = None
59
64
  env_public_key = os.getenv("LANGFUSE_PUBLIC_KEY")
60
65
  env_secret_key = os.getenv("LANGFUSE_SECRET_KEY")
61
66
  env_host = os.getenv("LANGFUSE_HOST", "https://cloud.langfuse.com")
62
67
  if env_public_key and env_secret_key:
63
- _langfuse = Langfuse(tracing_enabled=True,
68
+ _langfuse = Langfuse(enabled=True,
64
69
  public_key=env_public_key,
65
70
  secret_key=env_secret_key,
66
71
  host=env_host)
67
- logging.info("Langfuse initialized Successfully by Key !")
72
+
73
+ logging.info("📡 XGAE_LANGFUSE initialized Successfully by Key !")
68
74
  else:
69
- _langfuse = Langfuse(tracing_enabled=False)
70
- logging.warning("Not set key, Langfuse is disabled!")
75
+ _langfuse = Langfuse(enabled=False)
76
+ logging.warning("📡 XGAE_LANGFUSE Not set key, Langfuse is disabled!")
71
77
 
72
78
  return _langfuse
73
79
 
74
-
75
- if __name__ == "__main__":
76
- from xgae.utils import langfuse
77
- trace_id = langfuse.create_trace_id()
78
- logging.warning(f"trace_id={trace_id}")
80
+ langfuse = setup_langfuse()
@@ -0,0 +1,11 @@
1
+ Metadata-Version: 2.4
2
+ Name: xgae
3
+ Version: 0.1.8
4
+ Summary: Extreme General Agent Engine
5
+ Requires-Python: >=3.13
6
+ Requires-Dist: colorlog==6.9.0
7
+ Requires-Dist: langchain-mcp-adapters==0.1.9
8
+ Requires-Dist: langfuse==2.60.9
9
+ Requires-Dist: langgraph==0.6.5
10
+ Requires-Dist: litellm==1.74.15
11
+ Requires-Dist: mcp==1.13.0
@@ -0,0 +1,19 @@
1
+ xgae/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ xgae/engine/engine_base.py,sha256=MExJxXHkufv_a-n9tm3FZAbJyxVAwEkRC3C_WAGdu4Q,1746
3
+ xgae/engine/mcp_tool_box.py,sha256=6bsORGK2HE-NS8vWNnbbgUQ9uGr_MI7JGO9j_FYKUKA,9903
4
+ xgae/engine/prompt_builder.py,sha256=8_rNRJksn2QLV_K98S0x0qNeHcmxhU0kB_53IZJTGOU,4366
5
+ xgae/engine/task_engine.py,sha256=kOntdzVtTjjakIhcBNK5vBMiBuAXPqB4R5ustY-BNfU,18931
6
+ xgae/engine/responser/non_stream_responser.py,sha256=tXvkGHFGm2oO0IE8Bz9DFDwqemBkO92Nb6MbX1ukHeE,6352
7
+ xgae/engine/responser/responser_base.py,sha256=FdR0yX7VfU5kW-4JmTXBfnn9AwyX1EbOAnslkC7Vcyg,29874
8
+ xgae/engine/responser/stream_responser.py,sha256=oPGtrT1nedGMjiBAwPzUlu6Z_rPWeVSODC1xQ6D8cTY,52055
9
+ xgae/tools/without_general_tools_app.py,sha256=QknIF4OW9xvOad8gx-F_sCBwQYXqMalnNFvYvZXkQ_I,3789
10
+ xgae/utils/__init__.py,sha256=GPubcIs2XFPiWKnuCpevAtYEmVWKJuXlmGkmsH9qoXA,219
11
+ xgae/utils/json_helpers.py,sha256=K1ja6GJCatrAheW9bEWAYSQbDI42__boBCZgtsv1gtk,4865
12
+ xgae/utils/llm_client.py,sha256=Y-o26VW1MOhJYsWJ0zR4u_YXsHSEbvVPY6r90zLQJXU,13213
13
+ xgae/utils/misc.py,sha256=EK94YesZp8AmRUqWfN-CjTxyEHPWdIIWpFNO17dzm9g,915
14
+ xgae/utils/setup_env.py,sha256=P_p74q3nroBdTkAElfGr4QLm4fu7ZRP0R9BMGNUL010,2352
15
+ xgae/utils/xml_tool_parser.py,sha256=EJ6BjpD4CSdmS_LqViUJ6P8H9GY2R1e4Dh8rLCR6nSE,7474
16
+ xgae-0.1.8.dist-info/METADATA,sha256=mk0D1208wUJnbIZxyteCAjE2tlKtQixumHo0mcqD3RI,309
17
+ xgae-0.1.8.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
18
+ xgae-0.1.8.dist-info/entry_points.txt,sha256=rhQ9Vksnu8nA78lPTjiJxOCZ5k6sH6s5YNMR68y7C-A,73
19
+ xgae-0.1.8.dist-info/RECORD,,
@@ -0,0 +1,2 @@
1
+ [console_scripts]
2
+ xgae-tools = xgae.tools.without_general_tools_app:main
@@ -1,11 +0,0 @@
1
- Metadata-Version: 2.4
2
- Name: xgae
3
- Version: 0.1.6
4
- Summary: Extreme General Agent Engine
5
- Requires-Python: >=3.13
6
- Requires-Dist: colorlog>=6.9.0
7
- Requires-Dist: langchain-mcp-adapters>=0.1.4
8
- Requires-Dist: langfuse>=2.60.5
9
- Requires-Dist: langgraph>=0.3.21
10
- Requires-Dist: litellm>=1.74.8
11
- Requires-Dist: mcp>=1.12.1
@@ -1,17 +0,0 @@
1
- xgae/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- xgae/engine/engine_base.py,sha256=ySERuLy1YWsf-3s0NFKcyTnXQ4g69wR-cQhtnG0OFmU,1747
3
- xgae/engine/mcp_tool_box.py,sha256=6mdvu9-aquyLJEwebTtpa_bfGmgT1jPszKE90NIpR5c,9852
4
- xgae/engine/prompt_builder.py,sha256=ygFAIc4p3opIMyl6g1JeBuSiMjNVxwRloKeF2eX8R5I,4354
5
- xgae/engine/task_engine.py,sha256=xxAWtPfKgSpf6L7wOc243U-7YP8AC2WYoCI-FUdDpOc,18132
6
- xgae/engine/responser/non_stream_responser.py,sha256=QEFE4JGYVaIbFeMUMJa1Mt1uBblU_hAOywAhyp9V1k4,6634
7
- xgae/engine/responser/responser_base.py,sha256=aHKJ880B1ezfBWzyHoOSNVDb-CJY4ujH2MGm61aJLy8,31468
8
- xgae/engine/responser/stream_responser.py,sha256=5KzCHApiPplZ-zN_sbbEbSvj2rtvKWBshJKe_-x7RDI,52927
9
- xgae/utils/__init__.py,sha256=jChvD-p_p5gsrCZUVYPUGJs4CS9gIdNFcSOpkRpcM4Y,317
10
- xgae/utils/json_helpers.py,sha256=K1ja6GJCatrAheW9bEWAYSQbDI42__boBCZgtsv1gtk,4865
11
- xgae/utils/llm_client.py,sha256=mgzn8heUyRm92HTLEYGdfsGEpFtD-xLFr39P98_JP0s,12402
12
- xgae/utils/misc.py,sha256=EK94YesZp8AmRUqWfN-CjTxyEHPWdIIWpFNO17dzm9g,915
13
- xgae/utils/setup_env.py,sha256=Nc0HCQOnK-EGNLTWCQ9-iYysNRdIvwGhcHdqpNeV910,2407
14
- xgae/utils/xml_tool_parser.py,sha256=EJ6BjpD4CSdmS_LqViUJ6P8H9GY2R1e4Dh8rLCR6nSE,7474
15
- xgae-0.1.6.dist-info/METADATA,sha256=Q5OiPe5W3H7ym2TDPaM1x3k6jSTIol3QDyWI0dsQetw,309
16
- xgae-0.1.6.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
17
- xgae-0.1.6.dist-info/RECORD,,
File without changes