xgae 0.1.7__py3-none-any.whl → 0.1.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of xgae might be problematic. Click here for more details.

@@ -55,4 +55,4 @@ class XGAToolBox(ABC):
55
55
 
56
56
  @abstractmethod
57
57
  async def call_tool(self, task_id: str, tool_name: str, args: Optional[Dict[str, Any]] = None) -> XGAToolResult:
58
- pass
58
+ pass
@@ -8,7 +8,6 @@ from langchain_mcp_adapters.client import MultiServerMCPClient
8
8
  from langchain_mcp_adapters.tools import load_mcp_tools
9
9
 
10
10
  from xgae.engine.engine_base import XGAError, XGAToolSchema, XGAToolBox, XGAToolResult
11
- from xgae.utils import langfuse
12
11
 
13
12
  class XGAMcpToolBox(XGAToolBox):
14
13
  GENERAL_MCP_SERVER_NAME = "xga_general"
@@ -194,8 +193,8 @@ if __name__ == "__main__":
194
193
 
195
194
  async def main():
196
195
  task_id = "task1"
197
- #mcp_tool_box = XGAMcpToolBox(custom_mcp_server_file="mcpservers/custom_servers.json")
198
- mcp_tool_box = XGAMcpToolBox()
196
+ mcp_tool_box = XGAMcpToolBox(custom_mcp_server_file="mcpservers/custom_servers.json")
197
+ #mcp_tool_box = XGAMcpToolBox()
199
198
  await mcp_tool_box.load_mcp_tools_schema()
200
199
  await mcp_tool_box.creat_task_tool_box(task_id=task_id, general_tools=["*"], custom_tools=["bomc_fault.*"])
201
200
  tool_schemas = mcp_tool_box.get_task_tool_schemas(task_id, "general_tool")
@@ -210,9 +209,6 @@ if __name__ == "__main__":
210
209
  print(asdict(tool_schema))
211
210
  print()
212
211
 
213
- result = await mcp_tool_box.call_tool(task_id=task_id, tool_name="web_search", args={"task_id": task_id, "query": "查询天津天气"})
214
- print(f"call web_search result: {result}")
215
-
216
212
  result = await mcp_tool_box.call_tool(task_id=task_id, tool_name="complete", args={"task_id": task_id})
217
213
  print(f"call complete result: {result}")
218
214
 
@@ -3,7 +3,6 @@ import logging
3
3
  from typing import List, Dict, Any, AsyncGenerator, override,Optional
4
4
 
5
5
  from xgae.engine.responser.responser_base import TaskResponseProcessor, TaskResponserContext, TaskRunContinuousState
6
- from xgae.utils import langfuse
7
6
  from xgae.utils.json_helpers import format_for_yield
8
7
 
9
8
  class NonStreamTaskResponser(TaskResponseProcessor):
@@ -16,6 +15,7 @@ class NonStreamTaskResponser(TaskResponseProcessor):
16
15
  llm_content = ""
17
16
  parsed_xml_data = []
18
17
  finish_reason = None
18
+ llm_count = continuous_state.get("auto_continue_count")
19
19
 
20
20
  try:
21
21
  # Extract finish_reason, content, tool calls
@@ -24,7 +24,7 @@ class NonStreamTaskResponser(TaskResponseProcessor):
24
24
  finish_reason = llm_response.choices[0].finish_reason
25
25
  logging.info(f"NonStreamTask:LLM response finish_reason={finish_reason}")
26
26
 
27
- langfuse.create_event(trace_context=self.trace_context, name="non_stream_processor_start", level="DEFAULT",
27
+ self.root_span.event(name=f"non_stream_processor_start[{self.task_no}]({llm_count})", level="DEFAULT",
28
28
  status_message=(f"finish_reason={finish_reason}, tool_exec_strategy={self.tool_execution_strategy}"))
29
29
 
30
30
  response_message = llm_response.choices[0].message if hasattr(llm_response.choices[0], 'message') else None
@@ -90,7 +90,7 @@ class NonStreamTaskResponser(TaskResponseProcessor):
90
90
 
91
91
  except Exception as e:
92
92
  logging.error(f"NonStreamTask: Error processing non-streaming response: {llm_content}")
93
- langfuse.create_event(trace_context=self.trace_context, name="error_processing_non_streaming_response", level="ERROR",
93
+ self.root_span.event(name="error_processing_non_streaming_response", level="ERROR",
94
94
  status_message=(f"Error processing non-streaming response: {str(e)}"))
95
95
 
96
96
  content = {"role": "system", "status_type": "error", "message": str(e)}
@@ -100,7 +100,7 @@ class NonStreamTaskResponser(TaskResponseProcessor):
100
100
 
101
101
  # Re-raise the same exception (not a new one) to ensure proper error propagation
102
102
  logging.critical(f"NonStreamTask: Re-raising error to stop further processing: {str(e)}")
103
- langfuse.create_event(trace_context=self.trace_context, name="re_raising_error_to_stop_further_processing", level="CRITICAL",
103
+ self.root_span.event(name="re_raising_error_to_stop_further_processing", level="CRITICAL",
104
104
  status_message=(f"Re-raising error to stop further processing: {str(e)}"))
105
105
  raise # Use bare 'raise' to preserve the original exception with its traceback
106
106
 
@@ -7,8 +7,9 @@ from dataclasses import dataclass
7
7
  from typing import List, Dict, Any, Optional, Tuple, Union, Literal, Callable, TypedDict, AsyncGenerator
8
8
 
9
9
  from xgae.engine.engine_base import XGAToolResult, XGAToolBox
10
- from xgae.utils import langfuse
11
- from xgae.utils.json_helpers import safe_json_parse,format_for_yield
10
+ from xgae.utils.setup_env import langfuse
11
+
12
+ from xgae.utils.json_helpers import safe_json_parse, format_for_yield
12
13
  from xgae.utils.xml_tool_parser import XMLToolParser
13
14
 
14
15
  # Type alias for XML result adding strategy
@@ -21,6 +22,7 @@ class TaskResponserContext(TypedDict, total=False):
21
22
  is_stream: bool
22
23
  task_id: str
23
24
  task_run_id: str
25
+ task_no: int
24
26
  trace_id: str
25
27
  root_span_id: str
26
28
  model_name: str
@@ -56,15 +58,12 @@ class TaskResponseProcessor(ABC):
56
58
 
57
59
  self.task_id = response_context.get("task_id")
58
60
  self.task_run_id = response_context.get("task_run_id")
61
+ self.task_no = response_context.get("task_no")
59
62
  self.tool_execution_strategy = self.response_context.get("tool_execution_strategy", "parallel")
60
63
  self.xml_adding_strategy = self.response_context.get("xml_adding_strategy", "user_message")
61
64
  self.max_xml_tool_calls = self.response_context.get("max_xml_tool_calls", 0)
62
65
 
63
- self.trace_context = {
64
- "trace_id": self.response_context.get("trace_id"),
65
- "parent_span_id": self.response_context.get("root_span_id"),
66
- }
67
-
66
+ self.root_span = langfuse.span(trace_id=response_context.get("trace_id"), id=response_context.get("root_span_id"))
68
67
  self.add_response_message = response_context.get("add_response_msg_func")
69
68
 
70
69
  self.tool_box = response_context.get("tool_box")
@@ -174,7 +173,7 @@ class TaskResponseProcessor(ABC):
174
173
  except Exception as e:
175
174
  logging.error(f"Error extracting XML chunks: {e}")
176
175
  logging.error(f"Content was: {content}")
177
- langfuse.create_event(trace_context=self.trace_context, name="error_extracting_xml_chunks", level="ERROR",
176
+ self.root_span.event(name="error_extracting_xml_chunks", level="ERROR",
178
177
  status_message=(f"Error extracting XML chunks: {e}"), metadata={"content": content})
179
178
 
180
179
  return chunks
@@ -224,7 +223,7 @@ class TaskResponseProcessor(ABC):
224
223
  except Exception as e:
225
224
  logging.error(f"Error parsing XML chunk: {e}")
226
225
  logging.error(f"XML chunk was: {xml_chunk}")
227
- langfuse.create_event(trace_context=self.trace_context, name="error_parsing_xml_chunk", level="ERROR",
226
+ self.root_span.event(name="error_parsing_xml_chunk", level="ERROR",
228
227
  status_message=(f"Error parsing XML chunk: {e}"), metadata={"xml_chunk": xml_chunk})
229
228
  return None
230
229
 
@@ -250,7 +249,7 @@ class TaskResponseProcessor(ABC):
250
249
 
251
250
  except Exception as e:
252
251
  logging.error(f"Error parsing XML tool calls: {e}", exc_info=True)
253
- langfuse.create_event(trace_context=self.trace_context, name="error_parsing_xml_tool_calls", level="ERROR",
252
+ self.root_span.event(name="error_parsing_xml_tool_calls", level="ERROR",
254
253
  status_message=(f"Error parsing XML tool calls: {e}"), metadata={"content": content})
255
254
 
256
255
  return parsed_data
@@ -258,36 +257,35 @@ class TaskResponseProcessor(ABC):
258
257
 
259
258
  async def _execute_tool(self, tool_call: Dict[str, Any]) -> XGAToolResult:
260
259
  """Execute a single tool call and return the result."""
261
- with langfuse.start_as_current_span(trace_context=self.trace_context, name=f"execute_tool.{tool_call['function_name']}", input=tool_call["arguments"]
262
- ) as exec_tool_span:
263
- try:
264
- function_name = tool_call["function_name"]
265
- arguments = tool_call["arguments"]
266
-
267
- logging.info(f"Executing tool: {function_name} with arguments: {arguments}")
268
-
269
- if isinstance(arguments, str):
270
- try:
271
- arguments = safe_json_parse(arguments)
272
- except json.JSONDecodeError:
273
- arguments = {"text": arguments} # @todo modify
274
-
275
- result = None
276
- available_tool_names = self.tool_box.get_task_tool_names(self.task_id)
277
- if function_name in available_tool_names:
278
- result = await self.tool_box.call_tool(self.task_id, function_name, arguments)
279
- else:
280
- logging.error(f"Tool function '{function_name}' not found in registry")
281
- result = XGAToolResult(success=False, output=f"Tool function '{function_name}' not found")
282
- logging.info(f"Tool execution complete: {function_name} -> {result}")
283
- exec_tool_span.update(status_message="tool_executed", output=result)
284
-
285
- return result
286
- except Exception as e:
287
- logging.error(f"Error executing tool {tool_call['function_name']}: {str(e)}", exc_info=True)
260
+ exec_tool_span = self.root_span.span(name=f"execute_tool.{tool_call['function_name']}", input=tool_call["arguments"])
261
+ try:
262
+ function_name = tool_call["function_name"]
263
+ arguments = tool_call["arguments"]
264
+
265
+ logging.info(f"Executing tool: {function_name} with arguments: {arguments}")
266
+
267
+ if isinstance(arguments, str):
268
+ try:
269
+ arguments = safe_json_parse(arguments)
270
+ except json.JSONDecodeError:
271
+ arguments = {"text": arguments} # @todo modify
272
+
273
+ result = None
274
+ available_tool_names = self.tool_box.get_task_tool_names(self.task_id)
275
+ if function_name in available_tool_names:
276
+ result = await self.tool_box.call_tool(self.task_id, function_name, arguments)
277
+ else:
278
+ logging.error(f"Tool function '{function_name}' not found in registry")
279
+ result = XGAToolResult(success=False, output=f"Tool function '{function_name}' not found")
280
+ logging.info(f"Tool execution complete: {function_name} -> {result}")
281
+ exec_tool_span.update(status_message="tool_executed", output=result)
282
+
283
+ return result
284
+ except Exception as e:
285
+ logging.error(f"Error executing tool {tool_call['function_name']}: {str(e)}", exc_info=True)
288
286
 
289
- exec_tool_span.update(status_message="tool_execution_error", output=f"Error executing tool: {str(e)}", level="ERROR")
290
- return XGAToolResult(success=False, output=f"Error executing tool: {str(e)}")
287
+ exec_tool_span.update(status_message="tool_execution_error", output=f"Error executing tool: {str(e)}", level="ERROR")
288
+ return XGAToolResult(success=False, output=f"Error executing tool: {str(e)}")
291
289
 
292
290
  async def _execute_tools(
293
291
  self,
@@ -321,7 +319,7 @@ class TaskResponseProcessor(ABC):
321
319
  return []
322
320
  tool_names = [t.get('function_name', 'unknown') for t in tool_calls]
323
321
  logging.info(f"Executing {len(tool_calls)} tools sequentially: {tool_names}")
324
- langfuse.create_event(trace_context=self.trace_context, name="executing_tools_sequentially", level="DEFAULT",
322
+ self.root_span.event(name="executing_tools_sequentially", level="DEFAULT",
325
323
  status_message=(f"Executing {len(tool_calls)} tools sequentially: {tool_names}"))
326
324
 
327
325
  results = []
@@ -337,19 +335,19 @@ class TaskResponseProcessor(ABC):
337
335
  # Check if this is a terminating tool (ask or complete)
338
336
  if tool_name in ['ask', 'complete']:
339
337
  logging.info(f"Terminating tool '{tool_name}' executed. Stopping further tool execution.")
340
- # langfuse.create_event(trace_context=self.trace_context, name="terminating_tool_executed",
338
+ # self.root_span.event(name="terminating_tool_executed",
341
339
  # level="DEFAULT", status_message=(f"Terminating tool '{tool_name}' executed. Stopping further tool execution."))
342
340
  break # Stop executing remaining tools
343
341
 
344
342
  except Exception as e:
345
343
  logging.error(f"Error executing tool {tool_name}: {str(e)}")
346
- langfuse.create_event(trace_context=self.trace_context, name="error_executing_tool", level="ERROR",
344
+ self.root_span.event(name="error_executing_tool", level="ERROR",
347
345
  status_message=(f"Error executing tool {tool_name}: {str(e)}"))
348
346
  error_result = XGAToolResult(success=False, output=f"Error executing tool: {str(e)}")
349
347
  results.append((tool_call, error_result))
350
348
 
351
349
  logging.info(f"Sequential execution completed for {len(results)} tools (out of {len(tool_calls)} total)")
352
- # langfuse.create_event(trace_context=self.trace_context, name="sequential_execution_completed", level="DEFAULT",
350
+ # self.root_span.event(name="sequential_execution_completed", level="DEFAULT",
353
351
  # status_message=(f"Sequential execution completed for {len(results)} tools (out of {len(tool_calls)} total)"))
354
352
  return results
355
353
 
@@ -361,7 +359,7 @@ class TaskResponseProcessor(ABC):
361
359
  try:
362
360
  tool_names = [t.get('function_name', 'unknown') for t in tool_calls]
363
361
  logging.info(f"Executing {len(tool_calls)} tools in parallel: {tool_names}")
364
- # langfuse.create_event(trace_context=self.trace_context, name="executing_tools_in_parallel", level="DEFAULT",
362
+ # self.root_span.event(name="executing_tools_in_parallel", level="DEFAULT",
365
363
  # status_message=(f"Executing {len(tool_calls)} tools in parallel: {tool_names}"))
366
364
 
367
365
  # Create tasks for all tool calls
@@ -375,7 +373,7 @@ class TaskResponseProcessor(ABC):
375
373
  for i, (tool_call, result) in enumerate(zip(tool_calls, results)):
376
374
  if isinstance(result, Exception):
377
375
  logging.error(f"Error executing tool {tool_call.get('function_name', 'unknown')}: {str(result)}")
378
- langfuse.create_event(trace_context=self.trace_context, name="error_executing_tool", level="ERROR", status_message=(
376
+ self.root_span.event(name="error_executing_tool", level="ERROR", status_message=(
379
377
  f"Error executing tool {tool_call.get('function_name', 'unknown')}: {str(result)}"))
380
378
  # Create error result
381
379
  error_result = XGAToolResult(success=False, output=f"Error executing tool: {str(result)}")
@@ -384,13 +382,13 @@ class TaskResponseProcessor(ABC):
384
382
  processed_results.append((tool_call, result))
385
383
 
386
384
  logging.info(f"Parallel execution completed for {len(tool_calls)} tools")
387
- # langfuse.create_event(trace_context=self.trace_context, name="parallel_execution_completed", level="DEFAULT",
385
+ # self.root_span.event(name="parallel_execution_completed", level="DEFAULT",
388
386
  # status_message=(f"Parallel execution completed for {len(tool_calls)} tools"))
389
387
  return processed_results
390
388
 
391
389
  except Exception as e:
392
390
  logging.error(f"Error in parallel tool execution: {str(e)}", exc_info=True)
393
- langfuse.create_event(trace_context=self.trace_context, name="error_in_parallel_tool_execution", level="ERROR",
391
+ self.root_span.event(name="error_in_parallel_tool_execution", level="ERROR",
394
392
  status_message=(f"Error in parallel tool execution: {str(e)}"))
395
393
  # Return error results for all tools if the gather itself fails
396
394
  return [(tool_call, XGAToolResult(success=False, output=f"Execution error: {str(e)}"))
@@ -461,7 +459,7 @@ class TaskResponseProcessor(ABC):
461
459
  return message_obj # Return the modified message object
462
460
  except Exception as e:
463
461
  logging.error(f"Error adding tool result: {str(e)}", exc_info=True)
464
- langfuse.create_event(trace_context=self.trace_context, name="error_adding_tool_result", level="ERROR",
462
+ self.root_span.event(name="error_adding_tool_result", level="ERROR",
465
463
  status_message=(f"Error adding tool result: {str(e)}"),
466
464
  metadata={"tool_call": tool_call, "result": result, "strategy": strategy,
467
465
  "assistant_message_id": assistant_message_id,
@@ -481,7 +479,7 @@ class TaskResponseProcessor(ABC):
481
479
  return message_obj # Return the full message object
482
480
  except Exception as e2:
483
481
  logging.error(f"Failed even with fallback message: {str(e2)}", exc_info=True)
484
- langfuse.create_event(trace_context=self.trace_context, name="failed_even_with_fallback_message", level="ERROR",
482
+ self.root_span.event(name="failed_even_with_fallback_message", level="ERROR",
485
483
  status_message=(f"Failed even with fallback message: {str(e2)}"),
486
484
  metadata={"tool_call": tool_call, "result": result, "strategy": strategy,
487
485
  "assistant_message_id": assistant_message_id,
@@ -589,7 +587,7 @@ class TaskResponseProcessor(ABC):
589
587
  if context.function_name in ['ask', 'complete']:
590
588
  metadata["agent_should_terminate"] = "true"
591
589
  logging.info(f"Marking tool status for '{context.function_name}' with termination signal.")
592
- # langfuse.create_event(trace_context=self.trace_context, name="marking_tool_status_for_termination", level="DEFAULT", status_message=(
590
+ # self.root_span.event(name="marking_tool_status_for_termination", level="DEFAULT", status_message=(
593
591
  # f"Marking tool status for '{context.function_name}' with termination signal."))
594
592
  # <<< END ADDED >>>
595
593
 
@@ -7,7 +7,6 @@ from dataclasses import dataclass
7
7
  from datetime import datetime, timezone
8
8
  from typing import List, Dict, Any, Optional, AsyncGenerator, override, Literal
9
9
 
10
- from xgae.utils import langfuse
11
10
  from xgae.engine.responser.responser_base import TaskResponseProcessor, TaskResponserContext,TaskRunContinuousState,XmlAddingStrategy,ToolExecutionStrategy
12
11
  from xgae.utils.json_helpers import (
13
12
  ensure_dict, safe_json_parse,
@@ -205,7 +204,7 @@ class StreamTaskResponser(TaskResponseProcessor):
205
204
  __sequence += 1
206
205
  else:
207
206
  logging.info("XML tool call limit reached - not yielding more content chunks")
208
- langfuse.create_event(trace_context=self.trace_context, name="xml_tool_call_limit_reached", level="DEFAULT", status_message=(
207
+ self.root_span.event(name="xml_tool_call_limit_reached", level="DEFAULT", status_message=(
209
208
  f"XML tool call limit reached - not yielding more content chunks"))
210
209
 
211
210
  # --- Process XML Tool Calls (if enabled and limit not reached) ---
@@ -317,7 +316,7 @@ class StreamTaskResponser(TaskResponseProcessor):
317
316
 
318
317
  if finish_reason == "xml_tool_limit_reached":
319
318
  logging.info("Stopping stream processing after loop due to XML tool call limit")
320
- langfuse.create_event(trace_context=self.trace_context, name="stopping_stream_processing_after_loop_due_to_xml_tool_call_limit",
319
+ self.root_span.event(name="stopping_stream_processing_after_loop_due_to_xml_tool_call_limit",
321
320
  level="DEFAULT", status_message=(
322
321
  f"Stopping stream processing after loop due to XML tool call limit"))
323
322
  break
@@ -352,18 +351,18 @@ class StreamTaskResponser(TaskResponseProcessor):
352
351
  # f"🔥 Estimated tokens – prompt: {prompt_tokens}, "
353
352
  # f"completion: {completion_tokens}, total: {prompt_tokens + completion_tokens}"
354
353
  # )
355
- langfuse.create_event(trace_context=self.trace_context, name="usage_calculated_with_litellm_token_counter", level="DEFAULT",
354
+ self.root_span.event(name="usage_calculated_with_litellm_token_counter", level="DEFAULT",
356
355
  status_message=(f"Usage calculated with litellm.token_counter"))
357
356
  except Exception as e:
358
357
  logging.warning(f"Failed to calculate usage: {str(e)}")
359
- langfuse.create_event(trace_context=self.trace_context, name="failed_to_calculate_usage", level="WARNING",
358
+ self.root_span.event(name="failed_to_calculate_usage", level="WARNING",
360
359
  status_message=(f"Failed to calculate usage: {str(e)}"))
361
360
 
362
361
  # Wait for pending tool executions from streaming phase
363
362
  tool_results_buffer = [] # Stores (tool_call, result, tool_index, context)
364
363
  if pending_tool_executions:
365
364
  logging.info(f"Waiting for {len(pending_tool_executions)} pending streamed tool executions")
366
- langfuse.create_event(trace_context=self.trace_context, name="waiting_for_pending_streamed_tool_executions", level="DEFAULT", status_message=(
365
+ self.root_span.event(name="waiting_for_pending_streamed_tool_executions", level="DEFAULT", status_message=(
367
366
  f"Waiting for {len(pending_tool_executions)} pending streamed tool executions"))
368
367
  # ... (asyncio.wait logic) ...
369
368
  pending_tasks = [execution["task"] for execution in pending_tool_executions]
@@ -387,19 +386,19 @@ class StreamTaskResponser(TaskResponseProcessor):
387
386
  if tool_name in ['ask', 'complete']:
388
387
  logging.info(
389
388
  f"Terminating tool '{tool_name}' completed during streaming. Setting termination flag.")
390
- langfuse.create_event(trace_context=self.trace_context, name="terminating_tool_completed_during_streaming",
389
+ self.root_span.event(name="terminating_tool_completed_during_streaming",
391
390
  level="DEFAULT", status_message=(
392
391
  f"Terminating tool '{tool_name}' completed during streaming. Setting termination flag."))
393
392
  agent_should_terminate = True
394
393
 
395
394
  else: # Should not happen with asyncio.wait
396
395
  logging.warning(f"Task for tool index {tool_idx} not done after wait.")
397
- langfuse.create_event(trace_context=self.trace_context, name="task_for_tool_index_not_done_after_wait", level="WARNING",
396
+ self.root_span.event(name="task_for_tool_index_not_done_after_wait", level="WARNING",
398
397
  status_message=(
399
398
  f"Task for tool index {tool_idx} not done after wait."))
400
399
  except Exception as e:
401
400
  logging.error(f"Error getting result for pending tool execution {tool_idx}: {str(e)}")
402
- langfuse.create_event(trace_context=self.trace_context, name="error_getting_result_for_pending_tool_execution", level="ERROR",
401
+ self.root_span.event(name="error_getting_result_for_pending_tool_execution", level="ERROR",
403
402
  status_message=(
404
403
  f"Error getting result for pending tool execution {tool_idx}: {str(e)}"))
405
404
  context.error = e
@@ -419,7 +418,7 @@ class StreamTaskResponser(TaskResponseProcessor):
419
418
  if tool_name in ['ask', 'complete']:
420
419
  logging.info(
421
420
  f"Terminating tool '{tool_name}' completed during streaming. Setting termination flag.")
422
- langfuse.create_event(trace_context=self.trace_context, name="terminating_tool_completed_during_streaming", level="DEFAULT",
421
+ self.root_span.event(name="terminating_tool_completed_during_streaming", level="DEFAULT",
423
422
  status_message=(
424
423
  f"Terminating tool '{tool_name}' completed during streaming. Setting termination flag."))
425
424
  agent_should_terminate = True
@@ -432,7 +431,7 @@ class StreamTaskResponser(TaskResponseProcessor):
432
431
  except Exception as e:
433
432
  logging.error(
434
433
  f"Error getting result/yielding status for pending tool execution {tool_idx}: {str(e)}")
435
- langfuse.create_event(trace_context=self.trace_context, name="error_getting_result_yielding_status_for_pending_tool_execution",
434
+ self.root_span.event(name="error_getting_result_yielding_status_for_pending_tool_execution",
436
435
  level="ERROR", status_message=(
437
436
  f"Error getting result/yielding status for pending tool execution {tool_idx}: {str(e)}"))
438
437
  context.error = e
@@ -451,7 +450,7 @@ class StreamTaskResponser(TaskResponseProcessor):
451
450
  if finish_msg_obj: yield format_for_yield(finish_msg_obj)
452
451
  logging.info(
453
452
  f"Stream finished with reason: xml_tool_limit_reached after {xml_tool_call_count} XML tool calls")
454
- langfuse.create_event(trace_context=self.trace_context, name="stream_finished_with_reason_xml_tool_limit_reached_after_xml_tool_calls",
453
+ self.root_span.event(name="stream_finished_with_reason_xml_tool_limit_reached_after_xml_tool_calls",
455
454
  level="DEFAULT", status_message=(
456
455
  f"Stream finished with reason: xml_tool_limit_reached after {xml_tool_call_count} XML tool calls"))
457
456
 
@@ -501,7 +500,7 @@ class StreamTaskResponser(TaskResponseProcessor):
501
500
  yield format_for_yield(yield_message)
502
501
  else:
503
502
  logging.error(f"Failed to save final assistant message for thread {thread_id}")
504
- langfuse.create_event(trace_context=self.trace_context, name="failed_to_save_final_assistant_message_for_thread", level="ERROR",
503
+ self.root_span.event(name="failed_to_save_final_assistant_message_for_thread", level="ERROR",
505
504
  status_message=(f"Failed to save final assistant message for thread {thread_id}"))
506
505
  # Save and yield an error status
507
506
  err_content = {"role": "system", "status_type": "error",
@@ -569,7 +568,7 @@ class StreamTaskResponser(TaskResponseProcessor):
569
568
  # Populate from buffer if executed on stream
570
569
  if config.execute_on_stream and tool_results_buffer:
571
570
  logging.info(f"Processing {len(tool_results_buffer)} buffered tool results")
572
- langfuse.create_event(trace_context=self.trace_context, name="processing_buffered_tool_results", level="DEFAULT",
571
+ self.root_span.event(name="processing_buffered_tool_results", level="DEFAULT",
573
572
  status_message=(f"Processing {len(tool_results_buffer)} buffered tool results"))
574
573
  for tool_call, result, tool_idx, context in tool_results_buffer:
575
574
  if last_assistant_message_object: context.assistant_message_id = last_assistant_message_object[
@@ -580,7 +579,7 @@ class StreamTaskResponser(TaskResponseProcessor):
580
579
  elif final_tool_calls_to_process and not config.execute_on_stream:
581
580
  logging.info(
582
581
  f"Executing {len(final_tool_calls_to_process)} tools ({config.tool_execution_strategy}) after stream")
583
- langfuse.create_event(trace_context=self.trace_context, name="executing_tools_after_stream", level="DEFAULT", status_message=(
582
+ self.root_span.event(name="executing_tools_after_stream", level="DEFAULT", status_message=(
584
583
  f"Executing {len(final_tool_calls_to_process)} tools ({config.tool_execution_strategy}) after stream"))
585
584
  results_list = await self._execute_tools(final_tool_calls_to_process,
586
585
  config.tool_execution_strategy)
@@ -598,14 +597,14 @@ class StreamTaskResponser(TaskResponseProcessor):
598
597
  tool_results_map[current_tool_idx] = (tc, res, context)
599
598
  else:
600
599
  logging.warning(f"Could not map result for tool index {current_tool_idx}")
601
- langfuse.create_event(trace_context=self.trace_context, name="could_not_map_result_for_tool_index", level="WARNING",
600
+ self.root_span.event(name="could_not_map_result_for_tool_index", level="WARNING",
602
601
  status_message=(f"Could not map result for tool index {current_tool_idx}"))
603
602
  current_tool_idx += 1
604
603
 
605
604
  # Save and Yield each result message
606
605
  if tool_results_map:
607
606
  logging.info(f"Saving and yielding {len(tool_results_map)} final tool result messages")
608
- langfuse.create_event(trace_context=self.trace_context, name="saving_and_yielding_final_tool_result_messages", level="DEFAULT",
607
+ self.root_span.event(name="saving_and_yielding_final_tool_result_messages", level="DEFAULT",
609
608
  status_message=(
610
609
  f"Saving and yielding {len(tool_results_map)} final tool result messages"))
611
610
  for tool_idx in sorted(tool_results_map.keys()):
@@ -640,7 +639,7 @@ class StreamTaskResponser(TaskResponseProcessor):
640
639
  else:
641
640
  logging.error(
642
641
  f"Failed to save tool result for index {tool_idx}, not yielding result message.")
643
- langfuse.create_event(trace_context=self.trace_context, name="failed_to_save_tool_result_for_index", level="ERROR",
642
+ self.root_span.event(name="failed_to_save_tool_result_for_index", level="ERROR",
644
643
  status_message=(
645
644
  f"Failed to save tool result for index {tool_idx}, not yielding result message."))
646
645
  # Optionally yield error status for saving failure?
@@ -658,7 +657,7 @@ class StreamTaskResponser(TaskResponseProcessor):
658
657
  if agent_should_terminate:
659
658
  logging.info(
660
659
  "Agent termination requested after executing ask/complete tool. Stopping further processing.")
661
- langfuse.create_event(trace_context=self.trace_context, name="agent_termination_requested", level="DEFAULT",
660
+ self.root_span.event(name="agent_termination_requested", level="DEFAULT",
662
661
  status_message="Agent termination requested after executing ask/complete tool. Stopping further processing.")
663
662
 
664
663
  # Set finish reason to indicate termination
@@ -719,7 +718,7 @@ class StreamTaskResponser(TaskResponseProcessor):
719
718
  logging.info("Assistant response end saved for stream (before termination)")
720
719
  except Exception as e:
721
720
  logging.error(f"Error saving assistant response end for stream (before termination): {str(e)}")
722
- langfuse.create_event(trace_context=self.trace_context, name="error_saving_assistant_response_end_for_stream_before_termination",
721
+ self.root_span.event(name="error_saving_assistant_response_end_for_stream_before_termination",
723
722
  level="ERROR", status_message=(
724
723
  f"Error saving assistant response end for stream (before termination): {str(e)}"))
725
724
 
@@ -775,12 +774,12 @@ class StreamTaskResponser(TaskResponseProcessor):
775
774
  logging.info("Assistant response end saved for stream")
776
775
  except Exception as e:
777
776
  logging.error(f"Error saving assistant response end for stream: {str(e)}")
778
- langfuse.create_event(trace_context=self.trace_context, name="error_saving_assistant_response_end_for_stream", level="ERROR",
777
+ self.root_span.event(name="error_saving_assistant_response_end_for_stream", level="ERROR",
779
778
  status_message=(f"Error saving assistant response end for stream: {str(e)}"))
780
779
 
781
780
  except Exception as e:
782
781
  logging.error(f"Error processing stream: {str(e)}", exc_info=True)
783
- langfuse.create_event(trace_context=self.trace_context, name="error_processing_stream", level="ERROR",
782
+ self.root_span.event(name="error_processing_stream", level="ERROR",
784
783
  status_message=(f"Error processing stream: {str(e)}"))
785
784
  # Save and yield error status message
786
785
 
@@ -794,12 +793,12 @@ class StreamTaskResponser(TaskResponseProcessor):
794
793
  if err_msg_obj: yield format_for_yield(err_msg_obj) # Yield the saved error message
795
794
  # Re-raise the same exception (not a new one) to ensure proper error propagation
796
795
  logging.critical(f"Re-raising error to stop further processing: {str(e)}")
797
- langfuse.create_event(trace_context=self.trace_context, name="re_raising_error_to_stop_further_processing", level="ERROR",
796
+ self.root_span.event(name="re_raising_error_to_stop_further_processing", level="ERROR",
798
797
  status_message=(f"Re-raising error to stop further processing: {str(e)}"))
799
798
  else:
800
799
  logging.error(f"AnthropicException - Overloaded detected - Falling back to OpenRouter: {str(e)}",
801
800
  exc_info=True)
802
- langfuse.create_event(trace_context=self.trace_context, name="anthropic_exception_overloaded_detected", level="ERROR", status_message=(
801
+ self.root_span.event(name="anthropic_exception_overloaded_detected", level="ERROR", status_message=(
803
802
  f"AnthropicException - Overloaded detected - Falling back to OpenRouter: {str(e)}"))
804
803
  raise # Use bare 'raise' to preserve the original exception with its traceback
805
804
 
@@ -822,5 +821,5 @@ class StreamTaskResponser(TaskResponseProcessor):
822
821
  if end_msg_obj: yield format_for_yield(end_msg_obj)
823
822
  except Exception as final_e:
824
823
  logging.error(f"Error in finally block: {str(final_e)}", exc_info=True)
825
- langfuse.create_event(trace_context=self.trace_context, name="error_in_finally_block", level="ERROR",
824
+ self.root_span.event(name="error_in_finally_block", level="ERROR",
826
825
  status_message=(f"Error in finally block: {str(final_e)}"))
@@ -9,13 +9,16 @@ from uuid import uuid4
9
9
  from xgae.engine.responser.responser_base import TaskResponserContext, TaskResponseProcessor, TaskRunContinuousState
10
10
  from xgae.engine.engine_base import XGAResponseMsgType, XGAResponseMessage, XGAToolBox, XGATaskResult
11
11
 
12
- from xgae.utils import langfuse, handle_error
13
- from xgae.utils.llm_client import LLMClient, LLMConfig
12
+ from xgae.utils import handle_error
13
+ from xgae.utils.setup_env import langfuse
14
+
15
+ from xgae.utils.llm_client import LLMClient, LLMConfig, LangfuseMetadata
14
16
 
15
17
  from xgae.utils.json_helpers import format_for_yield
16
18
  from xgae.engine.prompt_builder import XGAPromptBuilder
17
19
  from xgae.engine.mcp_tool_box import XGAMcpToolBox
18
20
 
21
+
19
22
  class XGATaskEngine:
20
23
  def __init__(self,
21
24
  session_id: Optional[str] = None,
@@ -54,18 +57,14 @@ class XGATaskEngine:
54
57
  self.task_prompt :str = None
55
58
  self.trace_id :str = None
56
59
  self.root_span_id :str = None
60
+ self.root_span_name :str = None
57
61
 
58
62
  async def run_task_with_final_answer(self,
59
63
  task_message: Dict[str, Any],
60
64
  trace_id: Optional[str] = None) -> XGATaskResult:
61
- self.trace_id = trace_id or langfuse.create_trace_id()
62
- with langfuse.start_as_current_span(trace_context={"trace_id": self.trace_id},
63
- name="run_task_with_final_answer",
64
- input=task_message,
65
- metadata={"task_id": self.task_id},
66
- ) as root_span:
67
- self.root_span_id = root_span.id
68
-
65
+ final_result:XGATaskResult = None
66
+ try:
67
+ self._init_langfuse("run_task_with_final_answer", task_message, trace_id)
69
68
  chunks = []
70
69
  async for chunk in self.run_task(task_message=task_message, trace_id=trace_id):
71
70
  chunks.append(chunk)
@@ -75,8 +74,9 @@ class XGATaskEngine:
75
74
  else:
76
75
  final_result = XGATaskResult(type="error", content="LLM Answer is Empty")
77
76
 
78
- root_span.update(output=final_result)
79
77
  return final_result
78
+ finally:
79
+ self._end_langfuse("run_task_with_final_answer", final_result)
80
80
 
81
81
 
82
82
  async def run_task(self,
@@ -84,27 +84,15 @@ class XGATaskEngine:
84
84
  trace_id: Optional[str] = None) -> AsyncGenerator[Dict[str, Any], None]:
85
85
  try:
86
86
  await self._init_task()
87
- if self.root_span_id is None:
88
- self.trace_id = trace_id or langfuse.create_trace_id()
89
- with langfuse.start_as_current_span(trace_context={"trace_id": self.trace_id},
90
- name="run_task",
91
- input=task_message
92
- ) as root_span:
93
- self.root_span_id = root_span.id
94
-
87
+ self._init_langfuse("run_task", task_message, trace_id)
95
88
 
96
89
  self.add_response_message(type="user", content=task_message, is_llm_message=True)
97
90
 
98
- continuous_state: TaskRunContinuousState = {
99
- "accumulated_content": "",
100
- "auto_continue_count": 0,
101
- "auto_continue": False if self.max_auto_run <= 1 else True
102
- }
103
- async for chunk in self._run_task_auto(continuous_state):
91
+ async for chunk in self._run_task_auto():
104
92
  yield chunk
105
93
  finally:
106
94
  await self.tool_box.destroy_task_tool_box(self.task_id)
107
- self.root_span_id = None
95
+ self._end_langfuse("run_task")
108
96
 
109
97
 
110
98
  async def _init_task(self) -> None:
@@ -133,11 +121,17 @@ class XGATaskEngine:
133
121
  logging.info(f"general_tools={general_tools}, custom_tools={custom_tools}")
134
122
 
135
123
 
136
- async def _run_task_auto(self, continuous_state: TaskRunContinuousState) -> AsyncGenerator[Dict[str, Any], None]:
124
+ async def _run_task_auto(self) -> AsyncGenerator[Dict[str, Any], None]:
137
125
  def update_continuous_state(_auto_continue_count, _auto_continue):
138
126
  continuous_state["auto_continue_count"] = _auto_continue_count
139
127
  continuous_state["auto_continue"] = _auto_continue
140
128
 
129
+ continuous_state: TaskRunContinuousState = {
130
+ "accumulated_content": "",
131
+ "auto_continue_count": 0,
132
+ "auto_continue": False if self.max_auto_run <= 1 else True
133
+ }
134
+
141
135
  auto_continue_count = 0
142
136
  auto_continue = True
143
137
  while auto_continue and auto_continue_count < self.max_auto_run:
@@ -196,14 +190,15 @@ class XGATaskEngine:
196
190
  }
197
191
  llm_messages.append(temp_assistant_message)
198
192
 
199
- llm_response = await self.llm_client.create_completion(llm_messages, self.trace_id)
193
+ llm_count = continuous_state.get("auto_continue_count")
194
+ langfuse_metadata = self._create_llm_langfuse_meta(llm_count)
195
+ llm_response = await self.llm_client.create_completion(llm_messages, langfuse_metadata)
200
196
  response_processor = self._create_response_processer()
201
197
 
202
198
  async for chunk in response_processor.process_response(llm_response, llm_messages, continuous_state):
203
199
  self._logging_reponse_chunk(chunk)
204
200
  yield chunk
205
201
 
206
-
207
202
  def _parse_final_result(self, chunks: List[Dict[str, Any]]) -> XGATaskResult:
208
203
  final_result: XGATaskResult = None
209
204
  try:
@@ -301,6 +296,40 @@ class XGATaskEngine:
301
296
  return response_llm_contents
302
297
 
303
298
 
299
+ def _create_llm_langfuse_meta(self, llm_count:int)-> LangfuseMetadata:
300
+ generation_name = f"xga_task_engine_llm_completion[{self.task_no}]({llm_count})"
301
+ generation_id = f"{self.task_run_id}({llm_count})"
302
+ return LangfuseMetadata(
303
+ generation_name=generation_name,
304
+ generation_id=generation_id,
305
+ existing_trace_id=self.trace_id,
306
+ session_id=self.session_id,
307
+ )
308
+
309
+ def _init_langfuse(self,
310
+ root_span_name: str,
311
+ task_message: Dict[str, Any],
312
+ trace_id: Optional[str] = None):
313
+
314
+ if self.root_span_id is None:
315
+ trace = None
316
+ if trace_id:
317
+ self.trace_id = trace_id
318
+ trace = langfuse.trace(id=trace_id)
319
+ else:
320
+ trace = langfuse.trace(name="xga_task_engine")
321
+ self.trace_id = trace.id
322
+
323
+ span = trace.span(name=root_span_name, input=task_message,metadata={"task_id": self.task_id})
324
+ self.root_span_id = span.id
325
+ self.root_span_name = root_span_name
326
+
327
+ def _end_langfuse(self, root_span_name:str, output: Optional[XGATaskResult]=None):
328
+ if self.root_span_id and self.root_span_name == root_span_name:
329
+ langfuse.span(trace_id=self.trace_id, id=self.root_span_id).end(output=output)
330
+ self.root_span_id = None
331
+ self.root_span_name = None
332
+
304
333
  def _create_response_processer(self) -> TaskResponseProcessor:
305
334
  response_context = self._create_response_context()
306
335
  is_stream = response_context.get("is_stream", False)
@@ -316,6 +345,7 @@ class XGATaskEngine:
316
345
  "is_stream": self.is_stream,
317
346
  "task_id": self.task_id,
318
347
  "task_run_id": self.task_run_id,
348
+ "task_no": self.task_no,
319
349
  "trace_id": self.trace_id,
320
350
  "root_span_id": self.root_span_id,
321
351
  "model_name": self.model_name,
xgae/utils/__init__.py CHANGED
@@ -1,13 +1,9 @@
1
1
  import logging
2
2
 
3
- from .setup_env import setup_langfuse, setup_logging
4
-
5
- setup_logging()
6
- langfuse = setup_langfuse()
7
-
8
3
  def handle_error(e: Exception) -> None:
9
4
  import traceback
10
5
 
11
6
  logging.error("An error occurred: %s", str(e))
12
7
  logging.error("Traceback details:\n%s", traceback.format_exc())
13
8
  raise (e) from e
9
+
xgae/utils/llm_client.py CHANGED
@@ -9,19 +9,29 @@ from typing import Union, Dict, Any, Optional, List, TypedDict
9
9
  from litellm.utils import ModelResponse, CustomStreamWrapper
10
10
  from openai import OpenAIError
11
11
 
12
+ from xgae.utils.setup_env import langfuse
13
+
12
14
  class LLMConfig(TypedDict, total=False):
13
- model: str
14
- model_name: str
15
- model_id: str
16
- api_key: str
17
- api_base: str
18
- temperature: float
19
- max_tokens: int
20
- stream: bool
21
- enable_thinking: bool
22
- reasoning_effort: str
23
- response_format: str
24
- top_p: int
15
+ model: str # Optional Name of the model to use , Override .env LLM_MODEL
16
+ model_name: str # Optional Name of the model to use , use model if empty
17
+ model_id: str # Optional ARN for Bedrock inference profiles, default is None
18
+ api_key: str # Optional API key, Override .env LLM_API_KEY or OS env variable
19
+ api_base: str # Optional API base URL, Override .env LLM_API_BASE
20
+ temperature: float # temperature: Optional Sampling temperature (0-1), Override .env LLM_TEMPERATURE
21
+ max_tokens: int # max_tokens: Optional Maximum tokens in the response, Override .env LLM_MAX_TOKENS
22
+ stream: bool # stream: Optional whether to stream the response, default is True
23
+ enable_thinking: bool # Optional whether to enable thinking, default is False
24
+ reasoning_effort: str # Optional level of reasoning effort, default is ‘low’
25
+ response_format: str # response_format: Optional desired format for the response, default is None
26
+ top_p: int # Optional Top-p sampling parameter, default is None
27
+
28
+
29
+ class LangfuseMetadata(TypedDict, total=False):
30
+ generation_name: str
31
+ generation_id: str
32
+ existing_trace_id: str
33
+ session_id: str
34
+
25
35
 
26
36
  class LLMError(Exception):
27
37
  """Base exception for LLM-related errors."""
@@ -31,27 +41,15 @@ class LLMClient:
31
41
  RATE_LIMIT_DELAY = 30
32
42
  RETRY_DELAY = 0.1
33
43
 
34
- def __init__(self, llm_config: LLMConfig=None) -> None:
35
- """
36
- Arg: llm_config (Optional[Dict[str, Any]], optional)
37
- model: Override default model to use, default set by .env LLM_MODEL
38
- model_name: Optional Name of the model to use , use model if empty
39
- model_id: Optional ARN for Bedrock inference profiles, default is None
40
- api_key: Optional API key, Override .env LLM_API_KEY or OS environment variable
41
- api_base: Optional API base URL, Override .env LLM_API_BASE
42
- temperature: Optional Sampling temperature (0-1), Override .env LLM_TEMPERATURE
43
- max_tokens: Optional Maximum tokens in the response, Override .env LLM_MAX_TOKENS
44
- stream: Optional whether to stream the response, default is True
45
- response_format: Optional desired format for the response, default is None
46
- enable_thinking: Optional whether to enable thinking, default is False
47
- reasoning_effort: Optional level of reasoning effort, default is ‘low’
48
- top_p: Optional Top-p sampling parameter, default is None
49
- """
44
+ langfuse_inited = False
45
+ langfuse_enabled = False
50
46
 
51
- llm_config = llm_config or LLMConfig()
47
+ def __init__(self, llm_config: LLMConfig=None):
52
48
  litellm.modify_params = True
53
49
  litellm.drop_params = True
50
+ self._init_langfuse()
54
51
 
52
+ llm_config = llm_config or LLMConfig()
55
53
  self.max_retries = int(os.getenv("LLM_MAX_RETRIES", 1))
56
54
 
57
55
  env_llm_model = os.getenv("LLM_MODEL", "openai/qwen3-235b-a22b")
@@ -83,6 +81,20 @@ class LLMClient:
83
81
  self.lite_llm_params = self._prepare_llm_params(llm_config_params)
84
82
  logging.info(f"📡 LLMClient initialed : model={self.model_name}, is_stream={self.is_stream}, enable thinking={self.lite_llm_params['enable_thinking']}")
85
83
 
84
+ @staticmethod
85
+ def _init_langfuse():
86
+ if not LLMClient.langfuse_inited:
87
+ LLMClient.langfuse_inited =True
88
+ if langfuse and langfuse.enabled:
89
+ litellm.success_callback = ["langfuse"]
90
+ litellm.failure_callback = ["langfuse"]
91
+ LLMClient.langfuse_enabled = True
92
+ logging.info("=== LiteLLM Langfuse is enable !")
93
+ else:
94
+ LLMClient.langfuse_enabled = False
95
+ logging.warning("*** LiteLLM Langfuse is disable !")
96
+
97
+
86
98
 
87
99
  def _prepare_llm_params(self, llm_config_params: Dict[str, Any]) -> Dict[str, Any]:
88
100
  prepared_llm_params = llm_config_params.copy()
@@ -206,10 +218,10 @@ class LLMClient:
206
218
  logging.debug(f"LLMClient: Waiting {delay} seconds before retry llm completion...")
207
219
  await asyncio.sleep(delay)
208
220
 
209
- async def create_completion(self, messages: List[Dict[str, Any]], trace_id: Optional[str]=None) -> Union[ModelResponse, CustomStreamWrapper]:
221
+ async def create_completion(self, messages: List[Dict[str, Any]], langfuse_metadata: Optional[LangfuseMetadata]=None) -> Union[ModelResponse, CustomStreamWrapper]:
210
222
  complete_params = self._prepare_complete_params(messages)
211
- if trace_id:
212
- complete_params["litellm_trace_id"] = trace_id
223
+ if LLMClient.langfuse_enabled and langfuse_metadata:
224
+ complete_params["metadata"] = langfuse_metadata
213
225
 
214
226
  last_error = None
215
227
  for attempt in range(self.max_retries):
@@ -228,12 +240,11 @@ class LLMClient:
228
240
  raise LLMError(f"LLM completion failed after {self.max_retries} attempts !")
229
241
 
230
242
  if __name__ == "__main__":
231
- from xgae.utils import langfuse
232
-
233
243
  async def llm_completion():
234
244
  llm_client = LLMClient(LLMConfig(stream=False))
235
- messages = [{"role": "user", "content": "今天是2025年8月15日,北京本周每天温度"}]
236
- trace_id = langfuse.create_trace_id()
245
+ messages = [{"role": "user", "content": "1+1="}]
246
+ trace_id = langfuse.trace(name = "xgae_litellm_test").trace_id
247
+
237
248
  response = await llm_client.create_completion(messages, trace_id)
238
249
  if llm_client.is_stream:
239
250
  async for chunk in response:
@@ -247,6 +258,7 @@ if __name__ == "__main__":
247
258
  else:
248
259
  print(response.choices[0].message.content)
249
260
 
261
+
250
262
  asyncio.run(llm_completion())
251
263
 
252
264
 
xgae/utils/setup_env.py CHANGED
@@ -1,22 +1,27 @@
1
1
  import logging
2
2
  import os
3
3
 
4
+ from dotenv import load_dotenv
4
5
  from langfuse import Langfuse
5
6
 
6
- def setup_logging() -> None:
7
+ load_dotenv()
8
+
9
+ def setup_logging() :
10
+ log_enable = bool(os.getenv("LOG_ENABLE", True))
11
+ if not log_enable :
12
+ return
13
+
7
14
  import colorlog
8
- from dotenv import load_dotenv
9
- load_dotenv()
10
15
 
11
16
  env_log_level = os.getenv("LOG_LEVEL", "INFO")
12
- env_log_file = os.getenv("LOG_FILE", "log/xga.log")
17
+ log_file = os.getenv("LOG_FILE", "log/xga.log")
13
18
  log_level = getattr(logging, env_log_level.upper(), logging.INFO)
14
19
 
15
- log_dir = os.path.dirname(env_log_file)
20
+ log_dir = os.path.dirname(log_file)
16
21
  if log_dir and not os.path.exists(log_dir):
17
22
  os.makedirs(log_dir, exist_ok=True)
18
23
  else:
19
- os.remove(env_log_file)
24
+ os.remove(log_file)
20
25
 
21
26
  logger = logging.getLogger()
22
27
  for handler in logger.handlers[:]:
@@ -43,7 +48,7 @@ def setup_logging() -> None:
43
48
  console_handler = logging.StreamHandler()
44
49
  console_handler.setFormatter(console_formatter)
45
50
 
46
- file_handler = logging.FileHandler(env_log_file, encoding='utf-8')
51
+ file_handler = logging.FileHandler(log_file, encoding='utf-8')
47
52
  file_handler.setFormatter(file_formatter)
48
53
 
49
54
  logger.addHandler(console_handler)
@@ -51,30 +56,25 @@ def setup_logging() -> None:
51
56
 
52
57
  logger.setLevel(log_level)
53
58
 
54
- logging.info(f"Logger is initialized, log_level={env_log_level}, log_file={env_log_file}")
59
+ logging.info(f"📡 XGAE_LOGGING is initialized, log_level={env_log_level}, log_file={log_file}")
55
60
 
61
+ setup_logging()
56
62
 
57
63
  def setup_langfuse() -> Langfuse:
58
- _langfuse = None
59
64
  env_public_key = os.getenv("LANGFUSE_PUBLIC_KEY")
60
65
  env_secret_key = os.getenv("LANGFUSE_SECRET_KEY")
61
66
  env_host = os.getenv("LANGFUSE_HOST", "https://cloud.langfuse.com")
62
67
  if env_public_key and env_secret_key:
63
- _langfuse = Langfuse(tracing_enabled=True,
68
+ _langfuse = Langfuse(enabled=True,
64
69
  public_key=env_public_key,
65
70
  secret_key=env_secret_key,
66
71
  host=env_host)
67
72
 
68
- logging.info("Langfuse initialized Successfully by Key !")
73
+ logging.info("📡 XGAE_LANGFUSE initialized Successfully by Key !")
69
74
  else:
70
- _langfuse = Langfuse(tracing_enabled=False)
71
- logging.warning("Not set key, Langfuse is disabled!")
75
+ _langfuse = Langfuse(enabled=False)
76
+ logging.warning("📡 XGAE_LANGFUSE Not set key, Langfuse is disabled!")
72
77
 
73
78
  return _langfuse
74
79
 
75
-
76
- if __name__ == "__main__":
77
- from xgae.utils import langfuse
78
-
79
- trace_id = langfuse.create_trace_id()
80
- logging.warning(f"trace_id={trace_id}")
80
+ langfuse = setup_langfuse()
@@ -0,0 +1,11 @@
1
+ Metadata-Version: 2.4
2
+ Name: xgae
3
+ Version: 0.1.8
4
+ Summary: Extreme General Agent Engine
5
+ Requires-Python: >=3.13
6
+ Requires-Dist: colorlog==6.9.0
7
+ Requires-Dist: langchain-mcp-adapters==0.1.9
8
+ Requires-Dist: langfuse==2.60.9
9
+ Requires-Dist: langgraph==0.6.5
10
+ Requires-Dist: litellm==1.74.15
11
+ Requires-Dist: mcp==1.13.0
@@ -0,0 +1,19 @@
1
+ xgae/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ xgae/engine/engine_base.py,sha256=MExJxXHkufv_a-n9tm3FZAbJyxVAwEkRC3C_WAGdu4Q,1746
3
+ xgae/engine/mcp_tool_box.py,sha256=6bsORGK2HE-NS8vWNnbbgUQ9uGr_MI7JGO9j_FYKUKA,9903
4
+ xgae/engine/prompt_builder.py,sha256=8_rNRJksn2QLV_K98S0x0qNeHcmxhU0kB_53IZJTGOU,4366
5
+ xgae/engine/task_engine.py,sha256=kOntdzVtTjjakIhcBNK5vBMiBuAXPqB4R5ustY-BNfU,18931
6
+ xgae/engine/responser/non_stream_responser.py,sha256=tXvkGHFGm2oO0IE8Bz9DFDwqemBkO92Nb6MbX1ukHeE,6352
7
+ xgae/engine/responser/responser_base.py,sha256=FdR0yX7VfU5kW-4JmTXBfnn9AwyX1EbOAnslkC7Vcyg,29874
8
+ xgae/engine/responser/stream_responser.py,sha256=oPGtrT1nedGMjiBAwPzUlu6Z_rPWeVSODC1xQ6D8cTY,52055
9
+ xgae/tools/without_general_tools_app.py,sha256=QknIF4OW9xvOad8gx-F_sCBwQYXqMalnNFvYvZXkQ_I,3789
10
+ xgae/utils/__init__.py,sha256=GPubcIs2XFPiWKnuCpevAtYEmVWKJuXlmGkmsH9qoXA,219
11
+ xgae/utils/json_helpers.py,sha256=K1ja6GJCatrAheW9bEWAYSQbDI42__boBCZgtsv1gtk,4865
12
+ xgae/utils/llm_client.py,sha256=Y-o26VW1MOhJYsWJ0zR4u_YXsHSEbvVPY6r90zLQJXU,13213
13
+ xgae/utils/misc.py,sha256=EK94YesZp8AmRUqWfN-CjTxyEHPWdIIWpFNO17dzm9g,915
14
+ xgae/utils/setup_env.py,sha256=P_p74q3nroBdTkAElfGr4QLm4fu7ZRP0R9BMGNUL010,2352
15
+ xgae/utils/xml_tool_parser.py,sha256=EJ6BjpD4CSdmS_LqViUJ6P8H9GY2R1e4Dh8rLCR6nSE,7474
16
+ xgae-0.1.8.dist-info/METADATA,sha256=mk0D1208wUJnbIZxyteCAjE2tlKtQixumHo0mcqD3RI,309
17
+ xgae-0.1.8.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
18
+ xgae-0.1.8.dist-info/entry_points.txt,sha256=rhQ9Vksnu8nA78lPTjiJxOCZ5k6sH6s5YNMR68y7C-A,73
19
+ xgae-0.1.8.dist-info/RECORD,,
@@ -1,11 +0,0 @@
1
- Metadata-Version: 2.4
2
- Name: xgae
3
- Version: 0.1.7
4
- Summary: Extreme General Agent Engine
5
- Requires-Python: >=3.13
6
- Requires-Dist: colorlog>=6.9.0
7
- Requires-Dist: langchain-mcp-adapters>=0.1.4
8
- Requires-Dist: langfuse>=2.60.5
9
- Requires-Dist: langgraph>=0.3.21
10
- Requires-Dist: litellm>=1.74.8
11
- Requires-Dist: mcp>=1.12.1
@@ -1,19 +0,0 @@
1
- xgae/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- xgae/engine/engine_base.py,sha256=ySERuLy1YWsf-3s0NFKcyTnXQ4g69wR-cQhtnG0OFmU,1747
3
- xgae/engine/mcp_tool_box.py,sha256=iBfCLWXvbkKWd1JnSgBLA1T1KdSdpEZCrROkC_cXp7g,10132
4
- xgae/engine/prompt_builder.py,sha256=8_rNRJksn2QLV_K98S0x0qNeHcmxhU0kB_53IZJTGOU,4366
5
- xgae/engine/task_engine.py,sha256=mjS7oDlJH-1wBohzz1FfFng2nE_K0xL-QaNZDuQss3A,17986
6
- xgae/engine/responser/non_stream_responser.py,sha256=kOP9kDEhHtrDKMyVnBPWEx0yO-xTvVacfXGJJqFtONU,6395
7
- xgae/engine/responser/responser_base.py,sha256=rn3-LkS_alZUwdIxPfDGC_zAH_CncSQ2I-euYA6a45w,30524
8
- xgae/engine/responser/stream_responser.py,sha256=5KzCHApiPplZ-zN_sbbEbSvj2rtvKWBshJKe_-x7RDI,52927
9
- xgae/tools/without_general_tools_app.py,sha256=QknIF4OW9xvOad8gx-F_sCBwQYXqMalnNFvYvZXkQ_I,3789
10
- xgae/utils/__init__.py,sha256=jChvD-p_p5gsrCZUVYPUGJs4CS9gIdNFcSOpkRpcM4Y,317
11
- xgae/utils/json_helpers.py,sha256=K1ja6GJCatrAheW9bEWAYSQbDI42__boBCZgtsv1gtk,4865
12
- xgae/utils/llm_client.py,sha256=RvID4bL9yZon096uvuoFZPlqAPiHhET9-9qYp6sUERc,12605
13
- xgae/utils/misc.py,sha256=EK94YesZp8AmRUqWfN-CjTxyEHPWdIIWpFNO17dzm9g,915
14
- xgae/utils/setup_env.py,sha256=EVk0KG92Sk6ejBxXZbDDr_dc3KM8GFMofMA4HvXqSfM,2409
15
- xgae/utils/xml_tool_parser.py,sha256=EJ6BjpD4CSdmS_LqViUJ6P8H9GY2R1e4Dh8rLCR6nSE,7474
16
- xgae-0.1.7.dist-info/METADATA,sha256=AbGuJUOv4574WF9a2nQNaTxuOKEN2K_U7RaACci2PME,309
17
- xgae-0.1.7.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
18
- xgae-0.1.7.dist-info/entry_points.txt,sha256=rhQ9Vksnu8nA78lPTjiJxOCZ5k6sH6s5YNMR68y7C-A,73
19
- xgae-0.1.7.dist-info/RECORD,,
File without changes