xgae 0.1.7__tar.gz → 0.1.8__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of xgae might be problematic. Click here for more details.

Files changed (44) hide show
  1. {xgae-0.1.7 → xgae-0.1.8}/.env +1 -0
  2. {xgae-0.1.7 → xgae-0.1.8}/.idea/workspace.xml +12 -4
  3. xgae-0.1.8/PKG-INFO +11 -0
  4. {xgae-0.1.7 → xgae-0.1.8}/pyproject.toml +7 -7
  5. {xgae-0.1.7 → xgae-0.1.8}/src/xgae/engine/engine_base.py +1 -1
  6. {xgae-0.1.7 → xgae-0.1.8}/src/xgae/engine/mcp_tool_box.py +2 -6
  7. {xgae-0.1.7 → xgae-0.1.8}/src/xgae/engine/responser/non_stream_responser.py +4 -4
  8. {xgae-0.1.7 → xgae-0.1.8}/src/xgae/engine/responser/responser_base.py +48 -50
  9. {xgae-0.1.7 → xgae-0.1.8}/src/xgae/engine/responser/stream_responser.py +24 -25
  10. {xgae-0.1.7 → xgae-0.1.8}/src/xgae/engine/task_engine.py +59 -29
  11. {xgae-0.1.7 → xgae-0.1.8}/src/xgae/utils/__init__.py +1 -5
  12. {xgae-0.1.7 → xgae-0.1.8}/src/xgae/utils/llm_client.py +48 -36
  13. {xgae-0.1.7 → xgae-0.1.8}/src/xgae/utils/setup_env.py +19 -19
  14. xgae-0.1.8/test/test_langfuse.py +28 -0
  15. xgae-0.1.8/test/test_litellm_langfuse.py +58 -0
  16. {xgae-0.1.7 → xgae-0.1.8}/uv.lock +18 -127
  17. xgae-0.1.7/PKG-INFO +0 -11
  18. {xgae-0.1.7 → xgae-0.1.8}/.idea/.gitignore +0 -0
  19. {xgae-0.1.7 → xgae-0.1.8}/.idea/inspectionProfiles/Project_Default.xml +0 -0
  20. {xgae-0.1.7 → xgae-0.1.8}/.idea/inspectionProfiles/profiles_settings.xml +0 -0
  21. {xgae-0.1.7 → xgae-0.1.8}/.idea/misc.xml +0 -0
  22. {xgae-0.1.7 → xgae-0.1.8}/.idea/modules.xml +0 -0
  23. {xgae-0.1.7 → xgae-0.1.8}/.idea/vcs.xml +0 -0
  24. {xgae-0.1.7 → xgae-0.1.8}/.idea/xgae.iml +0 -0
  25. {xgae-0.1.7 → xgae-0.1.8}/.python-version +0 -0
  26. {xgae-0.1.7 → xgae-0.1.8}/README.md +0 -0
  27. {xgae-0.1.7 → xgae-0.1.8}/mcpservers/custom_servers.json +0 -0
  28. {xgae-0.1.7 → xgae-0.1.8}/mcpservers/xga_server.json +0 -0
  29. {xgae-0.1.7 → xgae-0.1.8}/mcpservers/xga_server_sse.json +0 -0
  30. {xgae-0.1.7 → xgae-0.1.8}/src/examples/run_human_in_loop.py +0 -0
  31. {xgae-0.1.7 → xgae-0.1.8}/src/examples/run_simple.py +0 -0
  32. {xgae-0.1.7 → xgae-0.1.8}/src/examples/run_user_prompt.py +0 -0
  33. {xgae-0.1.7 → xgae-0.1.8}/src/xgae/__init__.py +0 -0
  34. {xgae-0.1.7 → xgae-0.1.8}/src/xgae/engine/prompt_builder.py +0 -0
  35. {xgae-0.1.7 → xgae-0.1.8}/src/xgae/tools/without_general_tools_app.py +0 -0
  36. {xgae-0.1.7 → xgae-0.1.8}/src/xgae/utils/json_helpers.py +0 -0
  37. {xgae-0.1.7 → xgae-0.1.8}/src/xgae/utils/misc.py +0 -0
  38. {xgae-0.1.7 → xgae-0.1.8}/src/xgae/utils/xml_tool_parser.py +0 -0
  39. {xgae-0.1.7 → xgae-0.1.8}/templates/custom_tool_prompt_template.txt +0 -0
  40. {xgae-0.1.7 → xgae-0.1.8}/templates/example_user_prompt.txt +0 -0
  41. {xgae-0.1.7 → xgae-0.1.8}/templates/gemini_system_prompt_template.txt +0 -0
  42. {xgae-0.1.7 → xgae-0.1.8}/templates/general_tool_prompt_template.txt +0 -0
  43. {xgae-0.1.7 → xgae-0.1.8}/templates/system_prompt_response_sample.txt +0 -0
  44. {xgae-0.1.7 → xgae-0.1.8}/templates/system_prompt_template.txt +0 -0
@@ -1,6 +1,7 @@
1
1
  # LOG
2
2
  LOG_LEVEL=INFO
3
3
  LOG_FILE=log/xgae.log
4
+ LOG_ENABLE=True
4
5
 
5
6
  # LANGFUSE
6
7
  LANGFUSE_PUBLIC_KEY=
@@ -29,6 +29,7 @@
29
29
  "keyToString": {
30
30
  "ModuleVcsDetector.initialDetectionPerformed": "true",
31
31
  "Python.llm_client.executor": "Run",
32
+ "Python.mcp_tool_box.executor": "Run",
32
33
  "Python.message_tools_app.executor": "Run",
33
34
  "Python.responser_base.executor": "Run",
34
35
  "Python.run_engine_with_human_in_loop.executor": "Run",
@@ -38,6 +39,8 @@
38
39
  "Python.run_xga_engine.executor": "Run",
39
40
  "Python.setup_env.executor": "Run",
40
41
  "Python.task_engine.executor": "Run",
42
+ "Python.test_langfuse.executor": "Run",
43
+ "Python.test_litellm_langfuse.executor": "Run",
41
44
  "Python.utils.executor": "Run",
42
45
  "Python.xga_engine.executor": "Run",
43
46
  "Python.xga_mcp_tool_box.executor": "Debug",
@@ -160,7 +163,8 @@
160
163
  <workItem from="1755700523844" duration="24000" />
161
164
  <workItem from="1755737435202" duration="48139000" />
162
165
  <workItem from="1756044658912" duration="1248000" />
163
- <workItem from="1756082326044" duration="23338000" />
166
+ <workItem from="1756082326044" duration="23657000" />
167
+ <workItem from="1756168626188" duration="26778000" />
164
168
  </task>
165
169
  <servers />
166
170
  </component>
@@ -179,18 +183,22 @@
179
183
  </breakpoint-manager>
180
184
  </component>
181
185
  <component name="com.intellij.coverage.CoverageDataManagerImpl">
186
+ <SUITE FILE_PATH="coverage/xgae$test_litellm_langfuse.coverage" NAME="test_litellm_langfuse Coverage Results" MODIFIED="1756196476262" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
182
187
  <SUITE FILE_PATH="coverage/xgae$xga_engine.coverage" NAME="xga_engine Coverage Results" MODIFIED="1755580277172" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
183
188
  <SUITE FILE_PATH="coverage/xgae$run_simple.coverage" NAME="run_simple Coverage Results" MODIFIED="1756111714718" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
184
189
  <SUITE FILE_PATH="coverage/xgae$run_xga_engine.coverage" NAME="run_task_engine Coverage Results" MODIFIED="1756111613459" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
185
190
  <SUITE FILE_PATH="coverage/xgae$message_tools_app.coverage" NAME="message_tools_app Coverage Results" MODIFIED="1756094157566" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
186
191
  <SUITE FILE_PATH="coverage/xgae$run_engine_with_human_in_loop.coverage" NAME="run_engine_with_human_in_loop Coverage Results" MODIFIED="1756089269027" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
187
192
  <SUITE FILE_PATH="coverage/xgae$xga_prompt_builder.coverage" NAME="xga_prompt_builder Coverage Results" MODIFIED="1755587456555" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
193
+ <SUITE FILE_PATH="coverage/xgae$test_langfuse.coverage" NAME="test_langfuse Coverage Results" MODIFIED="1756196410142" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
194
+ <SUITE FILE_PATH="coverage/xgae$run_task_engine.coverage" NAME="run_task_engine Coverage Results" MODIFIED="1756207048901" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
188
195
  <SUITE FILE_PATH="coverage/xgae$responser_base.coverage" NAME="responser_base Coverage Results" MODIFIED="1756103040764" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
196
+ <SUITE FILE_PATH="coverage/xgae$mcp_tool_box.coverage" NAME="mcp_tool_box Coverage Results" MODIFIED="1756188784603" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
189
197
  <SUITE FILE_PATH="coverage/xgae$utils.coverage" NAME="utils Coverage Results" MODIFIED="1755226923439" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
190
- <SUITE FILE_PATH="coverage/xgae$setup_env.coverage" NAME="setup_env Coverage Results" MODIFIED="1755657717310" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
198
+ <SUITE FILE_PATH="coverage/xgae$setup_env.coverage" NAME="setup_env Coverage Results" MODIFIED="1756200209483" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
191
199
  <SUITE FILE_PATH="coverage/xgae$run_user_prompt.coverage" NAME="run_user_prompt Coverage Results" MODIFIED="1756089624828" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
192
200
  <SUITE FILE_PATH="coverage/xgae$xga_mcp_tool_box.coverage" NAME="xga_mcp_tool_box Coverage Results" MODIFIED="1755583099719" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
193
- <SUITE FILE_PATH="coverage/xgae$task_engine.coverage" NAME="task_engine Coverage Results" MODIFIED="1756102942600" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
194
- <SUITE FILE_PATH="coverage/xgae$llm_client.coverage" NAME="llm_client Coverage Results" MODIFIED="1756112044142" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
201
+ <SUITE FILE_PATH="coverage/xgae$llm_client.coverage" NAME="llm_client Coverage Results" MODIFIED="1756188100577" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
202
+ <SUITE FILE_PATH="coverage/xgae$task_engine.coverage" NAME="task_engine Coverage Results" MODIFIED="1756199881134" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
195
203
  </component>
196
204
  </project>
xgae-0.1.8/PKG-INFO ADDED
@@ -0,0 +1,11 @@
1
+ Metadata-Version: 2.4
2
+ Name: xgae
3
+ Version: 0.1.8
4
+ Summary: Extreme General Agent Engine
5
+ Requires-Python: >=3.13
6
+ Requires-Dist: colorlog==6.9.0
7
+ Requires-Dist: langchain-mcp-adapters==0.1.9
8
+ Requires-Dist: langfuse==2.60.9
9
+ Requires-Dist: langgraph==0.6.5
10
+ Requires-Dist: litellm==1.74.15
11
+ Requires-Dist: mcp==1.13.0
@@ -1,16 +1,16 @@
1
1
  [project]
2
2
  name = "xgae"
3
- version = "0.1.7"
3
+ version = "0.1.8"
4
4
  description = "Extreme General Agent Engine"
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.13"
7
7
  dependencies = [
8
- "colorlog>=6.9.0",
9
- "langchain-mcp-adapters>=0.1.4",
10
- "langgraph>=0.3.21",
11
- "litellm>=1.74.8",
12
- "mcp>=1.12.1",
13
- "langfuse>=2.60.5",
8
+ "colorlog==6.9.0",
9
+ "langchain-mcp-adapters==0.1.9",
10
+ "langgraph==0.6.5",
11
+ "litellm==1.74.15",
12
+ "mcp==1.13.0",
13
+ "langfuse==2.60.9",
14
14
  ]
15
15
 
16
16
  [build-system]
@@ -55,4 +55,4 @@ class XGAToolBox(ABC):
55
55
 
56
56
  @abstractmethod
57
57
  async def call_tool(self, task_id: str, tool_name: str, args: Optional[Dict[str, Any]] = None) -> XGAToolResult:
58
- pass
58
+ pass
@@ -8,7 +8,6 @@ from langchain_mcp_adapters.client import MultiServerMCPClient
8
8
  from langchain_mcp_adapters.tools import load_mcp_tools
9
9
 
10
10
  from xgae.engine.engine_base import XGAError, XGAToolSchema, XGAToolBox, XGAToolResult
11
- from xgae.utils import langfuse
12
11
 
13
12
  class XGAMcpToolBox(XGAToolBox):
14
13
  GENERAL_MCP_SERVER_NAME = "xga_general"
@@ -194,8 +193,8 @@ if __name__ == "__main__":
194
193
 
195
194
  async def main():
196
195
  task_id = "task1"
197
- #mcp_tool_box = XGAMcpToolBox(custom_mcp_server_file="mcpservers/custom_servers.json")
198
- mcp_tool_box = XGAMcpToolBox()
196
+ mcp_tool_box = XGAMcpToolBox(custom_mcp_server_file="mcpservers/custom_servers.json")
197
+ #mcp_tool_box = XGAMcpToolBox()
199
198
  await mcp_tool_box.load_mcp_tools_schema()
200
199
  await mcp_tool_box.creat_task_tool_box(task_id=task_id, general_tools=["*"], custom_tools=["bomc_fault.*"])
201
200
  tool_schemas = mcp_tool_box.get_task_tool_schemas(task_id, "general_tool")
@@ -210,9 +209,6 @@ if __name__ == "__main__":
210
209
  print(asdict(tool_schema))
211
210
  print()
212
211
 
213
- result = await mcp_tool_box.call_tool(task_id=task_id, tool_name="web_search", args={"task_id": task_id, "query": "查询天津天气"})
214
- print(f"call web_search result: {result}")
215
-
216
212
  result = await mcp_tool_box.call_tool(task_id=task_id, tool_name="complete", args={"task_id": task_id})
217
213
  print(f"call complete result: {result}")
218
214
 
@@ -3,7 +3,6 @@ import logging
3
3
  from typing import List, Dict, Any, AsyncGenerator, override,Optional
4
4
 
5
5
  from xgae.engine.responser.responser_base import TaskResponseProcessor, TaskResponserContext, TaskRunContinuousState
6
- from xgae.utils import langfuse
7
6
  from xgae.utils.json_helpers import format_for_yield
8
7
 
9
8
  class NonStreamTaskResponser(TaskResponseProcessor):
@@ -16,6 +15,7 @@ class NonStreamTaskResponser(TaskResponseProcessor):
16
15
  llm_content = ""
17
16
  parsed_xml_data = []
18
17
  finish_reason = None
18
+ llm_count = continuous_state.get("auto_continue_count")
19
19
 
20
20
  try:
21
21
  # Extract finish_reason, content, tool calls
@@ -24,7 +24,7 @@ class NonStreamTaskResponser(TaskResponseProcessor):
24
24
  finish_reason = llm_response.choices[0].finish_reason
25
25
  logging.info(f"NonStreamTask:LLM response finish_reason={finish_reason}")
26
26
 
27
- langfuse.create_event(trace_context=self.trace_context, name="non_stream_processor_start", level="DEFAULT",
27
+ self.root_span.event(name=f"non_stream_processor_start[{self.task_no}]({llm_count})", level="DEFAULT",
28
28
  status_message=(f"finish_reason={finish_reason}, tool_exec_strategy={self.tool_execution_strategy}"))
29
29
 
30
30
  response_message = llm_response.choices[0].message if hasattr(llm_response.choices[0], 'message') else None
@@ -90,7 +90,7 @@ class NonStreamTaskResponser(TaskResponseProcessor):
90
90
 
91
91
  except Exception as e:
92
92
  logging.error(f"NonStreamTask: Error processing non-streaming response: {llm_content}")
93
- langfuse.create_event(trace_context=self.trace_context, name="error_processing_non_streaming_response", level="ERROR",
93
+ self.root_span.event(name="error_processing_non_streaming_response", level="ERROR",
94
94
  status_message=(f"Error processing non-streaming response: {str(e)}"))
95
95
 
96
96
  content = {"role": "system", "status_type": "error", "message": str(e)}
@@ -100,7 +100,7 @@ class NonStreamTaskResponser(TaskResponseProcessor):
100
100
 
101
101
  # Re-raise the same exception (not a new one) to ensure proper error propagation
102
102
  logging.critical(f"NonStreamTask: Re-raising error to stop further processing: {str(e)}")
103
- langfuse.create_event(trace_context=self.trace_context, name="re_raising_error_to_stop_further_processing", level="CRITICAL",
103
+ self.root_span.event(name="re_raising_error_to_stop_further_processing", level="CRITICAL",
104
104
  status_message=(f"Re-raising error to stop further processing: {str(e)}"))
105
105
  raise # Use bare 'raise' to preserve the original exception with its traceback
106
106
 
@@ -7,8 +7,9 @@ from dataclasses import dataclass
7
7
  from typing import List, Dict, Any, Optional, Tuple, Union, Literal, Callable, TypedDict, AsyncGenerator
8
8
 
9
9
  from xgae.engine.engine_base import XGAToolResult, XGAToolBox
10
- from xgae.utils import langfuse
11
- from xgae.utils.json_helpers import safe_json_parse,format_for_yield
10
+ from xgae.utils.setup_env import langfuse
11
+
12
+ from xgae.utils.json_helpers import safe_json_parse, format_for_yield
12
13
  from xgae.utils.xml_tool_parser import XMLToolParser
13
14
 
14
15
  # Type alias for XML result adding strategy
@@ -21,6 +22,7 @@ class TaskResponserContext(TypedDict, total=False):
21
22
  is_stream: bool
22
23
  task_id: str
23
24
  task_run_id: str
25
+ task_no: int
24
26
  trace_id: str
25
27
  root_span_id: str
26
28
  model_name: str
@@ -56,15 +58,12 @@ class TaskResponseProcessor(ABC):
56
58
 
57
59
  self.task_id = response_context.get("task_id")
58
60
  self.task_run_id = response_context.get("task_run_id")
61
+ self.task_no = response_context.get("task_no")
59
62
  self.tool_execution_strategy = self.response_context.get("tool_execution_strategy", "parallel")
60
63
  self.xml_adding_strategy = self.response_context.get("xml_adding_strategy", "user_message")
61
64
  self.max_xml_tool_calls = self.response_context.get("max_xml_tool_calls", 0)
62
65
 
63
- self.trace_context = {
64
- "trace_id": self.response_context.get("trace_id"),
65
- "parent_span_id": self.response_context.get("root_span_id"),
66
- }
67
-
66
+ self.root_span = langfuse.span(trace_id=response_context.get("trace_id"), id=response_context.get("root_span_id"))
68
67
  self.add_response_message = response_context.get("add_response_msg_func")
69
68
 
70
69
  self.tool_box = response_context.get("tool_box")
@@ -174,7 +173,7 @@ class TaskResponseProcessor(ABC):
174
173
  except Exception as e:
175
174
  logging.error(f"Error extracting XML chunks: {e}")
176
175
  logging.error(f"Content was: {content}")
177
- langfuse.create_event(trace_context=self.trace_context, name="error_extracting_xml_chunks", level="ERROR",
176
+ self.root_span.event(name="error_extracting_xml_chunks", level="ERROR",
178
177
  status_message=(f"Error extracting XML chunks: {e}"), metadata={"content": content})
179
178
 
180
179
  return chunks
@@ -224,7 +223,7 @@ class TaskResponseProcessor(ABC):
224
223
  except Exception as e:
225
224
  logging.error(f"Error parsing XML chunk: {e}")
226
225
  logging.error(f"XML chunk was: {xml_chunk}")
227
- langfuse.create_event(trace_context=self.trace_context, name="error_parsing_xml_chunk", level="ERROR",
226
+ self.root_span.event(name="error_parsing_xml_chunk", level="ERROR",
228
227
  status_message=(f"Error parsing XML chunk: {e}"), metadata={"xml_chunk": xml_chunk})
229
228
  return None
230
229
 
@@ -250,7 +249,7 @@ class TaskResponseProcessor(ABC):
250
249
 
251
250
  except Exception as e:
252
251
  logging.error(f"Error parsing XML tool calls: {e}", exc_info=True)
253
- langfuse.create_event(trace_context=self.trace_context, name="error_parsing_xml_tool_calls", level="ERROR",
252
+ self.root_span.event(name="error_parsing_xml_tool_calls", level="ERROR",
254
253
  status_message=(f"Error parsing XML tool calls: {e}"), metadata={"content": content})
255
254
 
256
255
  return parsed_data
@@ -258,36 +257,35 @@ class TaskResponseProcessor(ABC):
258
257
 
259
258
  async def _execute_tool(self, tool_call: Dict[str, Any]) -> XGAToolResult:
260
259
  """Execute a single tool call and return the result."""
261
- with langfuse.start_as_current_span(trace_context=self.trace_context, name=f"execute_tool.{tool_call['function_name']}", input=tool_call["arguments"]
262
- ) as exec_tool_span:
263
- try:
264
- function_name = tool_call["function_name"]
265
- arguments = tool_call["arguments"]
266
-
267
- logging.info(f"Executing tool: {function_name} with arguments: {arguments}")
268
-
269
- if isinstance(arguments, str):
270
- try:
271
- arguments = safe_json_parse(arguments)
272
- except json.JSONDecodeError:
273
- arguments = {"text": arguments} # @todo modify
274
-
275
- result = None
276
- available_tool_names = self.tool_box.get_task_tool_names(self.task_id)
277
- if function_name in available_tool_names:
278
- result = await self.tool_box.call_tool(self.task_id, function_name, arguments)
279
- else:
280
- logging.error(f"Tool function '{function_name}' not found in registry")
281
- result = XGAToolResult(success=False, output=f"Tool function '{function_name}' not found")
282
- logging.info(f"Tool execution complete: {function_name} -> {result}")
283
- exec_tool_span.update(status_message="tool_executed", output=result)
284
-
285
- return result
286
- except Exception as e:
287
- logging.error(f"Error executing tool {tool_call['function_name']}: {str(e)}", exc_info=True)
260
+ exec_tool_span = self.root_span.span(name=f"execute_tool.{tool_call['function_name']}", input=tool_call["arguments"])
261
+ try:
262
+ function_name = tool_call["function_name"]
263
+ arguments = tool_call["arguments"]
264
+
265
+ logging.info(f"Executing tool: {function_name} with arguments: {arguments}")
266
+
267
+ if isinstance(arguments, str):
268
+ try:
269
+ arguments = safe_json_parse(arguments)
270
+ except json.JSONDecodeError:
271
+ arguments = {"text": arguments} # @todo modify
272
+
273
+ result = None
274
+ available_tool_names = self.tool_box.get_task_tool_names(self.task_id)
275
+ if function_name in available_tool_names:
276
+ result = await self.tool_box.call_tool(self.task_id, function_name, arguments)
277
+ else:
278
+ logging.error(f"Tool function '{function_name}' not found in registry")
279
+ result = XGAToolResult(success=False, output=f"Tool function '{function_name}' not found")
280
+ logging.info(f"Tool execution complete: {function_name} -> {result}")
281
+ exec_tool_span.update(status_message="tool_executed", output=result)
282
+
283
+ return result
284
+ except Exception as e:
285
+ logging.error(f"Error executing tool {tool_call['function_name']}: {str(e)}", exc_info=True)
288
286
 
289
- exec_tool_span.update(status_message="tool_execution_error", output=f"Error executing tool: {str(e)}", level="ERROR")
290
- return XGAToolResult(success=False, output=f"Error executing tool: {str(e)}")
287
+ exec_tool_span.update(status_message="tool_execution_error", output=f"Error executing tool: {str(e)}", level="ERROR")
288
+ return XGAToolResult(success=False, output=f"Error executing tool: {str(e)}")
291
289
 
292
290
  async def _execute_tools(
293
291
  self,
@@ -321,7 +319,7 @@ class TaskResponseProcessor(ABC):
321
319
  return []
322
320
  tool_names = [t.get('function_name', 'unknown') for t in tool_calls]
323
321
  logging.info(f"Executing {len(tool_calls)} tools sequentially: {tool_names}")
324
- langfuse.create_event(trace_context=self.trace_context, name="executing_tools_sequentially", level="DEFAULT",
322
+ self.root_span.event(name="executing_tools_sequentially", level="DEFAULT",
325
323
  status_message=(f"Executing {len(tool_calls)} tools sequentially: {tool_names}"))
326
324
 
327
325
  results = []
@@ -337,19 +335,19 @@ class TaskResponseProcessor(ABC):
337
335
  # Check if this is a terminating tool (ask or complete)
338
336
  if tool_name in ['ask', 'complete']:
339
337
  logging.info(f"Terminating tool '{tool_name}' executed. Stopping further tool execution.")
340
- # langfuse.create_event(trace_context=self.trace_context, name="terminating_tool_executed",
338
+ # self.root_span.event(name="terminating_tool_executed",
341
339
  # level="DEFAULT", status_message=(f"Terminating tool '{tool_name}' executed. Stopping further tool execution."))
342
340
  break # Stop executing remaining tools
343
341
 
344
342
  except Exception as e:
345
343
  logging.error(f"Error executing tool {tool_name}: {str(e)}")
346
- langfuse.create_event(trace_context=self.trace_context, name="error_executing_tool", level="ERROR",
344
+ self.root_span.event(name="error_executing_tool", level="ERROR",
347
345
  status_message=(f"Error executing tool {tool_name}: {str(e)}"))
348
346
  error_result = XGAToolResult(success=False, output=f"Error executing tool: {str(e)}")
349
347
  results.append((tool_call, error_result))
350
348
 
351
349
  logging.info(f"Sequential execution completed for {len(results)} tools (out of {len(tool_calls)} total)")
352
- # langfuse.create_event(trace_context=self.trace_context, name="sequential_execution_completed", level="DEFAULT",
350
+ # self.root_span.event(name="sequential_execution_completed", level="DEFAULT",
353
351
  # status_message=(f"Sequential execution completed for {len(results)} tools (out of {len(tool_calls)} total)"))
354
352
  return results
355
353
 
@@ -361,7 +359,7 @@ class TaskResponseProcessor(ABC):
361
359
  try:
362
360
  tool_names = [t.get('function_name', 'unknown') for t in tool_calls]
363
361
  logging.info(f"Executing {len(tool_calls)} tools in parallel: {tool_names}")
364
- # langfuse.create_event(trace_context=self.trace_context, name="executing_tools_in_parallel", level="DEFAULT",
362
+ # self.root_span.event(name="executing_tools_in_parallel", level="DEFAULT",
365
363
  # status_message=(f"Executing {len(tool_calls)} tools in parallel: {tool_names}"))
366
364
 
367
365
  # Create tasks for all tool calls
@@ -375,7 +373,7 @@ class TaskResponseProcessor(ABC):
375
373
  for i, (tool_call, result) in enumerate(zip(tool_calls, results)):
376
374
  if isinstance(result, Exception):
377
375
  logging.error(f"Error executing tool {tool_call.get('function_name', 'unknown')}: {str(result)}")
378
- langfuse.create_event(trace_context=self.trace_context, name="error_executing_tool", level="ERROR", status_message=(
376
+ self.root_span.event(name="error_executing_tool", level="ERROR", status_message=(
379
377
  f"Error executing tool {tool_call.get('function_name', 'unknown')}: {str(result)}"))
380
378
  # Create error result
381
379
  error_result = XGAToolResult(success=False, output=f"Error executing tool: {str(result)}")
@@ -384,13 +382,13 @@ class TaskResponseProcessor(ABC):
384
382
  processed_results.append((tool_call, result))
385
383
 
386
384
  logging.info(f"Parallel execution completed for {len(tool_calls)} tools")
387
- # langfuse.create_event(trace_context=self.trace_context, name="parallel_execution_completed", level="DEFAULT",
385
+ # self.root_span.event(name="parallel_execution_completed", level="DEFAULT",
388
386
  # status_message=(f"Parallel execution completed for {len(tool_calls)} tools"))
389
387
  return processed_results
390
388
 
391
389
  except Exception as e:
392
390
  logging.error(f"Error in parallel tool execution: {str(e)}", exc_info=True)
393
- langfuse.create_event(trace_context=self.trace_context, name="error_in_parallel_tool_execution", level="ERROR",
391
+ self.root_span.event(name="error_in_parallel_tool_execution", level="ERROR",
394
392
  status_message=(f"Error in parallel tool execution: {str(e)}"))
395
393
  # Return error results for all tools if the gather itself fails
396
394
  return [(tool_call, XGAToolResult(success=False, output=f"Execution error: {str(e)}"))
@@ -461,7 +459,7 @@ class TaskResponseProcessor(ABC):
461
459
  return message_obj # Return the modified message object
462
460
  except Exception as e:
463
461
  logging.error(f"Error adding tool result: {str(e)}", exc_info=True)
464
- langfuse.create_event(trace_context=self.trace_context, name="error_adding_tool_result", level="ERROR",
462
+ self.root_span.event(name="error_adding_tool_result", level="ERROR",
465
463
  status_message=(f"Error adding tool result: {str(e)}"),
466
464
  metadata={"tool_call": tool_call, "result": result, "strategy": strategy,
467
465
  "assistant_message_id": assistant_message_id,
@@ -481,7 +479,7 @@ class TaskResponseProcessor(ABC):
481
479
  return message_obj # Return the full message object
482
480
  except Exception as e2:
483
481
  logging.error(f"Failed even with fallback message: {str(e2)}", exc_info=True)
484
- langfuse.create_event(trace_context=self.trace_context, name="failed_even_with_fallback_message", level="ERROR",
482
+ self.root_span.event(name="failed_even_with_fallback_message", level="ERROR",
485
483
  status_message=(f"Failed even with fallback message: {str(e2)}"),
486
484
  metadata={"tool_call": tool_call, "result": result, "strategy": strategy,
487
485
  "assistant_message_id": assistant_message_id,
@@ -589,7 +587,7 @@ class TaskResponseProcessor(ABC):
589
587
  if context.function_name in ['ask', 'complete']:
590
588
  metadata["agent_should_terminate"] = "true"
591
589
  logging.info(f"Marking tool status for '{context.function_name}' with termination signal.")
592
- # langfuse.create_event(trace_context=self.trace_context, name="marking_tool_status_for_termination", level="DEFAULT", status_message=(
590
+ # self.root_span.event(name="marking_tool_status_for_termination", level="DEFAULT", status_message=(
593
591
  # f"Marking tool status for '{context.function_name}' with termination signal."))
594
592
  # <<< END ADDED >>>
595
593
 
@@ -7,7 +7,6 @@ from dataclasses import dataclass
7
7
  from datetime import datetime, timezone
8
8
  from typing import List, Dict, Any, Optional, AsyncGenerator, override, Literal
9
9
 
10
- from xgae.utils import langfuse
11
10
  from xgae.engine.responser.responser_base import TaskResponseProcessor, TaskResponserContext,TaskRunContinuousState,XmlAddingStrategy,ToolExecutionStrategy
12
11
  from xgae.utils.json_helpers import (
13
12
  ensure_dict, safe_json_parse,
@@ -205,7 +204,7 @@ class StreamTaskResponser(TaskResponseProcessor):
205
204
  __sequence += 1
206
205
  else:
207
206
  logging.info("XML tool call limit reached - not yielding more content chunks")
208
- langfuse.create_event(trace_context=self.trace_context, name="xml_tool_call_limit_reached", level="DEFAULT", status_message=(
207
+ self.root_span.event(name="xml_tool_call_limit_reached", level="DEFAULT", status_message=(
209
208
  f"XML tool call limit reached - not yielding more content chunks"))
210
209
 
211
210
  # --- Process XML Tool Calls (if enabled and limit not reached) ---
@@ -317,7 +316,7 @@ class StreamTaskResponser(TaskResponseProcessor):
317
316
 
318
317
  if finish_reason == "xml_tool_limit_reached":
319
318
  logging.info("Stopping stream processing after loop due to XML tool call limit")
320
- langfuse.create_event(trace_context=self.trace_context, name="stopping_stream_processing_after_loop_due_to_xml_tool_call_limit",
319
+ self.root_span.event(name="stopping_stream_processing_after_loop_due_to_xml_tool_call_limit",
321
320
  level="DEFAULT", status_message=(
322
321
  f"Stopping stream processing after loop due to XML tool call limit"))
323
322
  break
@@ -352,18 +351,18 @@ class StreamTaskResponser(TaskResponseProcessor):
352
351
  # f"🔥 Estimated tokens – prompt: {prompt_tokens}, "
353
352
  # f"completion: {completion_tokens}, total: {prompt_tokens + completion_tokens}"
354
353
  # )
355
- langfuse.create_event(trace_context=self.trace_context, name="usage_calculated_with_litellm_token_counter", level="DEFAULT",
354
+ self.root_span.event(name="usage_calculated_with_litellm_token_counter", level="DEFAULT",
356
355
  status_message=(f"Usage calculated with litellm.token_counter"))
357
356
  except Exception as e:
358
357
  logging.warning(f"Failed to calculate usage: {str(e)}")
359
- langfuse.create_event(trace_context=self.trace_context, name="failed_to_calculate_usage", level="WARNING",
358
+ self.root_span.event(name="failed_to_calculate_usage", level="WARNING",
360
359
  status_message=(f"Failed to calculate usage: {str(e)}"))
361
360
 
362
361
  # Wait for pending tool executions from streaming phase
363
362
  tool_results_buffer = [] # Stores (tool_call, result, tool_index, context)
364
363
  if pending_tool_executions:
365
364
  logging.info(f"Waiting for {len(pending_tool_executions)} pending streamed tool executions")
366
- langfuse.create_event(trace_context=self.trace_context, name="waiting_for_pending_streamed_tool_executions", level="DEFAULT", status_message=(
365
+ self.root_span.event(name="waiting_for_pending_streamed_tool_executions", level="DEFAULT", status_message=(
367
366
  f"Waiting for {len(pending_tool_executions)} pending streamed tool executions"))
368
367
  # ... (asyncio.wait logic) ...
369
368
  pending_tasks = [execution["task"] for execution in pending_tool_executions]
@@ -387,19 +386,19 @@ class StreamTaskResponser(TaskResponseProcessor):
387
386
  if tool_name in ['ask', 'complete']:
388
387
  logging.info(
389
388
  f"Terminating tool '{tool_name}' completed during streaming. Setting termination flag.")
390
- langfuse.create_event(trace_context=self.trace_context, name="terminating_tool_completed_during_streaming",
389
+ self.root_span.event(name="terminating_tool_completed_during_streaming",
391
390
  level="DEFAULT", status_message=(
392
391
  f"Terminating tool '{tool_name}' completed during streaming. Setting termination flag."))
393
392
  agent_should_terminate = True
394
393
 
395
394
  else: # Should not happen with asyncio.wait
396
395
  logging.warning(f"Task for tool index {tool_idx} not done after wait.")
397
- langfuse.create_event(trace_context=self.trace_context, name="task_for_tool_index_not_done_after_wait", level="WARNING",
396
+ self.root_span.event(name="task_for_tool_index_not_done_after_wait", level="WARNING",
398
397
  status_message=(
399
398
  f"Task for tool index {tool_idx} not done after wait."))
400
399
  except Exception as e:
401
400
  logging.error(f"Error getting result for pending tool execution {tool_idx}: {str(e)}")
402
- langfuse.create_event(trace_context=self.trace_context, name="error_getting_result_for_pending_tool_execution", level="ERROR",
401
+ self.root_span.event(name="error_getting_result_for_pending_tool_execution", level="ERROR",
403
402
  status_message=(
404
403
  f"Error getting result for pending tool execution {tool_idx}: {str(e)}"))
405
404
  context.error = e
@@ -419,7 +418,7 @@ class StreamTaskResponser(TaskResponseProcessor):
419
418
  if tool_name in ['ask', 'complete']:
420
419
  logging.info(
421
420
  f"Terminating tool '{tool_name}' completed during streaming. Setting termination flag.")
422
- langfuse.create_event(trace_context=self.trace_context, name="terminating_tool_completed_during_streaming", level="DEFAULT",
421
+ self.root_span.event(name="terminating_tool_completed_during_streaming", level="DEFAULT",
423
422
  status_message=(
424
423
  f"Terminating tool '{tool_name}' completed during streaming. Setting termination flag."))
425
424
  agent_should_terminate = True
@@ -432,7 +431,7 @@ class StreamTaskResponser(TaskResponseProcessor):
432
431
  except Exception as e:
433
432
  logging.error(
434
433
  f"Error getting result/yielding status for pending tool execution {tool_idx}: {str(e)}")
435
- langfuse.create_event(trace_context=self.trace_context, name="error_getting_result_yielding_status_for_pending_tool_execution",
434
+ self.root_span.event(name="error_getting_result_yielding_status_for_pending_tool_execution",
436
435
  level="ERROR", status_message=(
437
436
  f"Error getting result/yielding status for pending tool execution {tool_idx}: {str(e)}"))
438
437
  context.error = e
@@ -451,7 +450,7 @@ class StreamTaskResponser(TaskResponseProcessor):
451
450
  if finish_msg_obj: yield format_for_yield(finish_msg_obj)
452
451
  logging.info(
453
452
  f"Stream finished with reason: xml_tool_limit_reached after {xml_tool_call_count} XML tool calls")
454
- langfuse.create_event(trace_context=self.trace_context, name="stream_finished_with_reason_xml_tool_limit_reached_after_xml_tool_calls",
453
+ self.root_span.event(name="stream_finished_with_reason_xml_tool_limit_reached_after_xml_tool_calls",
455
454
  level="DEFAULT", status_message=(
456
455
  f"Stream finished with reason: xml_tool_limit_reached after {xml_tool_call_count} XML tool calls"))
457
456
 
@@ -501,7 +500,7 @@ class StreamTaskResponser(TaskResponseProcessor):
501
500
  yield format_for_yield(yield_message)
502
501
  else:
503
502
  logging.error(f"Failed to save final assistant message for thread {thread_id}")
504
- langfuse.create_event(trace_context=self.trace_context, name="failed_to_save_final_assistant_message_for_thread", level="ERROR",
503
+ self.root_span.event(name="failed_to_save_final_assistant_message_for_thread", level="ERROR",
505
504
  status_message=(f"Failed to save final assistant message for thread {thread_id}"))
506
505
  # Save and yield an error status
507
506
  err_content = {"role": "system", "status_type": "error",
@@ -569,7 +568,7 @@ class StreamTaskResponser(TaskResponseProcessor):
569
568
  # Populate from buffer if executed on stream
570
569
  if config.execute_on_stream and tool_results_buffer:
571
570
  logging.info(f"Processing {len(tool_results_buffer)} buffered tool results")
572
- langfuse.create_event(trace_context=self.trace_context, name="processing_buffered_tool_results", level="DEFAULT",
571
+ self.root_span.event(name="processing_buffered_tool_results", level="DEFAULT",
573
572
  status_message=(f"Processing {len(tool_results_buffer)} buffered tool results"))
574
573
  for tool_call, result, tool_idx, context in tool_results_buffer:
575
574
  if last_assistant_message_object: context.assistant_message_id = last_assistant_message_object[
@@ -580,7 +579,7 @@ class StreamTaskResponser(TaskResponseProcessor):
580
579
  elif final_tool_calls_to_process and not config.execute_on_stream:
581
580
  logging.info(
582
581
  f"Executing {len(final_tool_calls_to_process)} tools ({config.tool_execution_strategy}) after stream")
583
- langfuse.create_event(trace_context=self.trace_context, name="executing_tools_after_stream", level="DEFAULT", status_message=(
582
+ self.root_span.event(name="executing_tools_after_stream", level="DEFAULT", status_message=(
584
583
  f"Executing {len(final_tool_calls_to_process)} tools ({config.tool_execution_strategy}) after stream"))
585
584
  results_list = await self._execute_tools(final_tool_calls_to_process,
586
585
  config.tool_execution_strategy)
@@ -598,14 +597,14 @@ class StreamTaskResponser(TaskResponseProcessor):
598
597
  tool_results_map[current_tool_idx] = (tc, res, context)
599
598
  else:
600
599
  logging.warning(f"Could not map result for tool index {current_tool_idx}")
601
- langfuse.create_event(trace_context=self.trace_context, name="could_not_map_result_for_tool_index", level="WARNING",
600
+ self.root_span.event(name="could_not_map_result_for_tool_index", level="WARNING",
602
601
  status_message=(f"Could not map result for tool index {current_tool_idx}"))
603
602
  current_tool_idx += 1
604
603
 
605
604
  # Save and Yield each result message
606
605
  if tool_results_map:
607
606
  logging.info(f"Saving and yielding {len(tool_results_map)} final tool result messages")
608
- langfuse.create_event(trace_context=self.trace_context, name="saving_and_yielding_final_tool_result_messages", level="DEFAULT",
607
+ self.root_span.event(name="saving_and_yielding_final_tool_result_messages", level="DEFAULT",
609
608
  status_message=(
610
609
  f"Saving and yielding {len(tool_results_map)} final tool result messages"))
611
610
  for tool_idx in sorted(tool_results_map.keys()):
@@ -640,7 +639,7 @@ class StreamTaskResponser(TaskResponseProcessor):
640
639
  else:
641
640
  logging.error(
642
641
  f"Failed to save tool result for index {tool_idx}, not yielding result message.")
643
- langfuse.create_event(trace_context=self.trace_context, name="failed_to_save_tool_result_for_index", level="ERROR",
642
+ self.root_span.event(name="failed_to_save_tool_result_for_index", level="ERROR",
644
643
  status_message=(
645
644
  f"Failed to save tool result for index {tool_idx}, not yielding result message."))
646
645
  # Optionally yield error status for saving failure?
@@ -658,7 +657,7 @@ class StreamTaskResponser(TaskResponseProcessor):
658
657
  if agent_should_terminate:
659
658
  logging.info(
660
659
  "Agent termination requested after executing ask/complete tool. Stopping further processing.")
661
- langfuse.create_event(trace_context=self.trace_context, name="agent_termination_requested", level="DEFAULT",
660
+ self.root_span.event(name="agent_termination_requested", level="DEFAULT",
662
661
  status_message="Agent termination requested after executing ask/complete tool. Stopping further processing.")
663
662
 
664
663
  # Set finish reason to indicate termination
@@ -719,7 +718,7 @@ class StreamTaskResponser(TaskResponseProcessor):
719
718
  logging.info("Assistant response end saved for stream (before termination)")
720
719
  except Exception as e:
721
720
  logging.error(f"Error saving assistant response end for stream (before termination): {str(e)}")
722
- langfuse.create_event(trace_context=self.trace_context, name="error_saving_assistant_response_end_for_stream_before_termination",
721
+ self.root_span.event(name="error_saving_assistant_response_end_for_stream_before_termination",
723
722
  level="ERROR", status_message=(
724
723
  f"Error saving assistant response end for stream (before termination): {str(e)}"))
725
724
 
@@ -775,12 +774,12 @@ class StreamTaskResponser(TaskResponseProcessor):
775
774
  logging.info("Assistant response end saved for stream")
776
775
  except Exception as e:
777
776
  logging.error(f"Error saving assistant response end for stream: {str(e)}")
778
- langfuse.create_event(trace_context=self.trace_context, name="error_saving_assistant_response_end_for_stream", level="ERROR",
777
+ self.root_span.event(name="error_saving_assistant_response_end_for_stream", level="ERROR",
779
778
  status_message=(f"Error saving assistant response end for stream: {str(e)}"))
780
779
 
781
780
  except Exception as e:
782
781
  logging.error(f"Error processing stream: {str(e)}", exc_info=True)
783
- langfuse.create_event(trace_context=self.trace_context, name="error_processing_stream", level="ERROR",
782
+ self.root_span.event(name="error_processing_stream", level="ERROR",
784
783
  status_message=(f"Error processing stream: {str(e)}"))
785
784
  # Save and yield error status message
786
785
 
@@ -794,12 +793,12 @@ class StreamTaskResponser(TaskResponseProcessor):
794
793
  if err_msg_obj: yield format_for_yield(err_msg_obj) # Yield the saved error message
795
794
  # Re-raise the same exception (not a new one) to ensure proper error propagation
796
795
  logging.critical(f"Re-raising error to stop further processing: {str(e)}")
797
- langfuse.create_event(trace_context=self.trace_context, name="re_raising_error_to_stop_further_processing", level="ERROR",
796
+ self.root_span.event(name="re_raising_error_to_stop_further_processing", level="ERROR",
798
797
  status_message=(f"Re-raising error to stop further processing: {str(e)}"))
799
798
  else:
800
799
  logging.error(f"AnthropicException - Overloaded detected - Falling back to OpenRouter: {str(e)}",
801
800
  exc_info=True)
802
- langfuse.create_event(trace_context=self.trace_context, name="anthropic_exception_overloaded_detected", level="ERROR", status_message=(
801
+ self.root_span.event(name="anthropic_exception_overloaded_detected", level="ERROR", status_message=(
803
802
  f"AnthropicException - Overloaded detected - Falling back to OpenRouter: {str(e)}"))
804
803
  raise # Use bare 'raise' to preserve the original exception with its traceback
805
804
 
@@ -822,5 +821,5 @@ class StreamTaskResponser(TaskResponseProcessor):
822
821
  if end_msg_obj: yield format_for_yield(end_msg_obj)
823
822
  except Exception as final_e:
824
823
  logging.error(f"Error in finally block: {str(final_e)}", exc_info=True)
825
- langfuse.create_event(trace_context=self.trace_context, name="error_in_finally_block", level="ERROR",
824
+ self.root_span.event(name="error_in_finally_block", level="ERROR",
826
825
  status_message=(f"Error in finally block: {str(final_e)}"))