xgae 0.1.9__tar.gz → 0.1.10__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of xgae might be problematic. Click here for more details.
- {xgae-0.1.9 → xgae-0.1.10}/.env +1 -1
- xgae-0.1.10/.idea/ai_toolkit.xml +6 -0
- {xgae-0.1.9 → xgae-0.1.10}/.idea/workspace.xml +14 -5
- {xgae-0.1.9 → xgae-0.1.10}/PKG-INFO +1 -1
- {xgae-0.1.9 → xgae-0.1.10}/pyproject.toml +2 -1
- xgae-0.1.10/src/examples/agent/langgraph/react/react_agent.py +177 -0
- {xgae-0.1.9 → xgae-0.1.10}/src/examples/engine/run_human_in_loop.py +5 -3
- {xgae-0.1.9 → xgae-0.1.10}/src/examples/engine/run_simple.py +6 -2
- {xgae-0.1.9 → xgae-0.1.10}/src/examples/engine/run_user_prompt.py +4 -1
- xgae-0.1.10/src/xgae/__init__.py +4 -0
- xgae-0.1.10/src/xgae/cli_app.py +87 -0
- {xgae-0.1.9 → xgae-0.1.10}/src/xgae/engine/responser/non_stream_responser.py +3 -0
- {xgae-0.1.9 → xgae-0.1.10}/src/xgae/engine/task_engine.py +23 -8
- {xgae-0.1.9 → xgae-0.1.10}/src/xgae/engine/task_langfuse.py +8 -6
- {xgae-0.1.9 → xgae-0.1.10}/src/xgae/utils/__init__.py +1 -1
- {xgae-0.1.9 → xgae-0.1.10}/src/xgae/utils/llm_client.py +21 -13
- {xgae-0.1.9 → xgae-0.1.10}/src/xgae/utils/setup_env.py +3 -3
- {xgae-0.1.9 → xgae-0.1.10}/test/test_langfuse.py +2 -1
- {xgae-0.1.9 → xgae-0.1.10}/uv.lock +1 -1
- xgae-0.1.9/src/xgae/__init__.py +0 -0
- {xgae-0.1.9 → xgae-0.1.10}/.idea/.gitignore +0 -0
- {xgae-0.1.9 → xgae-0.1.10}/.idea/inspectionProfiles/Project_Default.xml +0 -0
- {xgae-0.1.9 → xgae-0.1.10}/.idea/inspectionProfiles/profiles_settings.xml +0 -0
- {xgae-0.1.9 → xgae-0.1.10}/.idea/misc.xml +0 -0
- {xgae-0.1.9 → xgae-0.1.10}/.idea/modules.xml +0 -0
- {xgae-0.1.9 → xgae-0.1.10}/.idea/vcs.xml +0 -0
- {xgae-0.1.9 → xgae-0.1.10}/.idea/xgae.iml +0 -0
- {xgae-0.1.9 → xgae-0.1.10}/.python-version +0 -0
- {xgae-0.1.9 → xgae-0.1.10}/README.md +0 -0
- {xgae-0.1.9 → xgae-0.1.10}/mcpservers/custom_servers.json +0 -0
- {xgae-0.1.9 → xgae-0.1.10}/mcpservers/xga_server.json +0 -0
- {xgae-0.1.9 → xgae-0.1.10}/mcpservers/xga_server_sse.json +0 -0
- {xgae-0.1.9 → xgae-0.1.10}/src/examples/tools/custom_fault_tools_app.py +0 -0
- {xgae-0.1.9 → xgae-0.1.10}/src/xgae/engine/engine_base.py +0 -0
- {xgae-0.1.9 → xgae-0.1.10}/src/xgae/engine/mcp_tool_box.py +0 -0
- {xgae-0.1.9 → xgae-0.1.10}/src/xgae/engine/prompt_builder.py +0 -0
- {xgae-0.1.9 → xgae-0.1.10}/src/xgae/engine/responser/responser_base.py +0 -0
- {xgae-0.1.9 → xgae-0.1.10}/src/xgae/engine/responser/stream_responser.py +0 -0
- {xgae-0.1.9 → xgae-0.1.10}/src/xgae/tools/without_general_tools_app.py +0 -0
- {xgae-0.1.9 → xgae-0.1.10}/src/xgae/utils/json_helpers.py +0 -0
- {xgae-0.1.9 → xgae-0.1.10}/src/xgae/utils/misc.py +0 -0
- {xgae-0.1.9 → xgae-0.1.10}/src/xgae/utils/xml_tool_parser.py +0 -0
- {xgae-0.1.9 → xgae-0.1.10}/templates/custom_tool_prompt_template.txt +0 -0
- /xgae-0.1.9/templates/example_user_prompt.txt → /xgae-0.1.10/templates/example/fault_user_prompt.txt +0 -0
- {xgae-0.1.9 → xgae-0.1.10}/templates/gemini_system_prompt_template.txt +0 -0
- {xgae-0.1.9 → xgae-0.1.10}/templates/general_tool_prompt_template.txt +0 -0
- {xgae-0.1.9 → xgae-0.1.10}/templates/system_prompt_response_sample.txt +0 -0
- {xgae-0.1.9 → xgae-0.1.10}/templates/system_prompt_template.txt +0 -0
- {xgae-0.1.9 → xgae-0.1.10}/test/test_litellm_langfuse.py +0 -0
{xgae-0.1.9 → xgae-0.1.10}/.env
RENAMED
|
@@ -10,12 +10,12 @@ LANGFUSE_HOST=https://cloud.langfuse.com
|
|
|
10
10
|
|
|
11
11
|
# LLM
|
|
12
12
|
LLM_MODEL=openai/qwen3-235b-a22b
|
|
13
|
-
#LLM_MODEL=openai/qwen-plus
|
|
14
13
|
LLM_API_BASE=https://dashscope.aliyuncs.com/compatible-mode/v1
|
|
15
14
|
LLM_API_KEY=
|
|
16
15
|
LLM_MAX_TOKENS=16384
|
|
17
16
|
LLM_TEMPERATURE=0.7
|
|
18
17
|
LLM_MAX_RETRIES=2
|
|
18
|
+
LLM_LANGFUSE_ENABLE=False
|
|
19
19
|
|
|
20
20
|
# TASK
|
|
21
21
|
MAX_AUTO_RUN = 15
|
|
@@ -28,6 +28,7 @@
|
|
|
28
28
|
<component name="PropertiesComponent"><![CDATA[{
|
|
29
29
|
"keyToString": {
|
|
30
30
|
"ModuleVcsDetector.initialDetectionPerformed": "true",
|
|
31
|
+
"Python.__init__.executor": "Run",
|
|
31
32
|
"Python.llm_client.executor": "Run",
|
|
32
33
|
"Python.mcp_tool_box.executor": "Run",
|
|
33
34
|
"Python.message_tools_app.executor": "Run",
|
|
@@ -40,12 +41,14 @@
|
|
|
40
41
|
"Python.run_xga_engine.executor": "Run",
|
|
41
42
|
"Python.setup_env.executor": "Run",
|
|
42
43
|
"Python.task_engine.executor": "Run",
|
|
44
|
+
"Python.task_langfuse.executor": "Debug",
|
|
43
45
|
"Python.test_langfuse.executor": "Run",
|
|
44
46
|
"Python.test_litellm_langfuse.executor": "Run",
|
|
45
47
|
"Python.utils.executor": "Run",
|
|
46
48
|
"Python.xga_engine.executor": "Run",
|
|
47
49
|
"Python.xga_mcp_tool_box.executor": "Debug",
|
|
48
50
|
"Python.xga_prompt_builder.executor": "Debug",
|
|
51
|
+
"Python.xgae_cli.executor": "Run",
|
|
49
52
|
"RunOnceActivity.ShowReadmeOnStart": "true",
|
|
50
53
|
"last_opened_file_path": "/Users/sharkystar/DevProjects/xga/xgae",
|
|
51
54
|
"node.js.detected.package.eslint": "true",
|
|
@@ -59,6 +62,7 @@
|
|
|
59
62
|
}]]></component>
|
|
60
63
|
<component name="RecentsManager">
|
|
61
64
|
<key name="MoveFile.RECENT_KEYS">
|
|
65
|
+
<recent name="$PROJECT_DIR$/src/examples" />
|
|
62
66
|
<recent name="$PROJECT_DIR$/src/examples/engine" />
|
|
63
67
|
<recent name="$PROJECT_DIR$/src/xgae/engine/responser" />
|
|
64
68
|
</key>
|
|
@@ -166,7 +170,9 @@
|
|
|
166
170
|
<workItem from="1755737435202" duration="48139000" />
|
|
167
171
|
<workItem from="1756044658912" duration="1248000" />
|
|
168
172
|
<workItem from="1756082326044" duration="23657000" />
|
|
169
|
-
<workItem from="1756168626188" duration="
|
|
173
|
+
<workItem from="1756168626188" duration="52435000" />
|
|
174
|
+
<workItem from="1756305726553" duration="2819000" />
|
|
175
|
+
<workItem from="1756340066915" duration="14276000" />
|
|
170
176
|
</task>
|
|
171
177
|
<servers />
|
|
172
178
|
</component>
|
|
@@ -186,22 +192,25 @@
|
|
|
186
192
|
</component>
|
|
187
193
|
<component name="com.intellij.coverage.CoverageDataManagerImpl">
|
|
188
194
|
<SUITE FILE_PATH="coverage/xgae$test_litellm_langfuse.coverage" NAME="test_litellm_langfuse Coverage Results" MODIFIED="1756196476262" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
|
|
195
|
+
<SUITE FILE_PATH="coverage/xgae$task_langfuse.coverage" NAME="task_langfuse Coverage Results" MODIFIED="1756306181167" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
|
|
189
196
|
<SUITE FILE_PATH="coverage/xgae$xga_engine.coverage" NAME="xga_engine Coverage Results" MODIFIED="1755580277172" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
|
|
190
|
-
<SUITE FILE_PATH="coverage/xgae$run_simple.coverage" NAME="run_simple Coverage Results" MODIFIED="
|
|
197
|
+
<SUITE FILE_PATH="coverage/xgae$run_simple.coverage" NAME="run_simple Coverage Results" MODIFIED="1756359842556" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
|
|
191
198
|
<SUITE FILE_PATH="coverage/xgae$run_human_in_loop.coverage" NAME="run_human_in_loop Coverage Results" MODIFIED="1756279131815" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
|
|
192
199
|
<SUITE FILE_PATH="coverage/xgae$run_xga_engine.coverage" NAME="run_task_engine Coverage Results" MODIFIED="1756111613459" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
|
|
193
200
|
<SUITE FILE_PATH="coverage/xgae$message_tools_app.coverage" NAME="message_tools_app Coverage Results" MODIFIED="1756094157566" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
|
|
194
201
|
<SUITE FILE_PATH="coverage/xgae$run_engine_with_human_in_loop.coverage" NAME="run_engine_with_human_in_loop Coverage Results" MODIFIED="1756089269027" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
|
|
202
|
+
<SUITE FILE_PATH="coverage/xgae$__init__.coverage" NAME="__init__ Coverage Results" MODIFIED="1756345820610" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
|
|
195
203
|
<SUITE FILE_PATH="coverage/xgae$xga_prompt_builder.coverage" NAME="xga_prompt_builder Coverage Results" MODIFIED="1755587456555" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
|
|
196
204
|
<SUITE FILE_PATH="coverage/xgae$test_langfuse.coverage" NAME="test_langfuse Coverage Results" MODIFIED="1756196410142" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
|
|
197
|
-
<SUITE FILE_PATH="coverage/xgae$run_task_engine.coverage" NAME="run_task_engine Coverage Results" MODIFIED="
|
|
205
|
+
<SUITE FILE_PATH="coverage/xgae$run_task_engine.coverage" NAME="run_task_engine Coverage Results" MODIFIED="1756341371676" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
|
|
198
206
|
<SUITE FILE_PATH="coverage/xgae$responser_base.coverage" NAME="responser_base Coverage Results" MODIFIED="1756103040764" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
|
|
199
207
|
<SUITE FILE_PATH="coverage/xgae$mcp_tool_box.coverage" NAME="mcp_tool_box Coverage Results" MODIFIED="1756274403389" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
|
|
200
208
|
<SUITE FILE_PATH="coverage/xgae$utils.coverage" NAME="utils Coverage Results" MODIFIED="1755226923439" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
|
|
201
209
|
<SUITE FILE_PATH="coverage/xgae$setup_env.coverage" NAME="setup_env Coverage Results" MODIFIED="1756273791782" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
|
|
202
210
|
<SUITE FILE_PATH="coverage/xgae$run_user_prompt.coverage" NAME="run_user_prompt Coverage Results" MODIFIED="1756279512361" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
|
|
211
|
+
<SUITE FILE_PATH="coverage/xgae$xgae_cli.coverage" NAME="xgae_cli Coverage Results" MODIFIED="1756347000850" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
|
|
203
212
|
<SUITE FILE_PATH="coverage/xgae$xga_mcp_tool_box.coverage" NAME="xga_mcp_tool_box Coverage Results" MODIFIED="1755583099719" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
|
|
204
|
-
<SUITE FILE_PATH="coverage/xgae$
|
|
205
|
-
<SUITE FILE_PATH="coverage/xgae$
|
|
213
|
+
<SUITE FILE_PATH="coverage/xgae$task_engine.coverage" NAME="task_engine Coverage Results" MODIFIED="1756308226461" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
|
|
214
|
+
<SUITE FILE_PATH="coverage/xgae$llm_client.coverage" NAME="llm_client Coverage Results" MODIFIED="1756308364265" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
|
|
206
215
|
</component>
|
|
207
216
|
</project>
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
[project]
|
|
2
2
|
name = "xgae"
|
|
3
|
-
version = "0.1.
|
|
3
|
+
version = "0.1.10"
|
|
4
4
|
description = "Extreme General Agent Engine"
|
|
5
5
|
readme = "README.md"
|
|
6
6
|
requires-python = ">=3.13"
|
|
@@ -21,5 +21,6 @@ build-backend = "hatchling.build"
|
|
|
21
21
|
exclude = ["log/*"]
|
|
22
22
|
|
|
23
23
|
[project.scripts]
|
|
24
|
+
xgae = "xgae.cli_app:main"
|
|
24
25
|
xgae-tools = "xgae.tools.without_general_tools_app:main"
|
|
25
26
|
custom_fault_tools = "examples.tools.custom_fault_tools_app:main"
|
|
@@ -0,0 +1,177 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import logging
|
|
3
|
+
from typing import Any, Dict, List, Annotated, Sequence, TypedDict
|
|
4
|
+
|
|
5
|
+
from langchain_core.messages import BaseMessage, HumanMessage, AIMessage
|
|
6
|
+
from langgraph.checkpoint.memory import MemorySaver
|
|
7
|
+
from langgraph.graph import END, START, StateGraph
|
|
8
|
+
from langgraph.graph.message import add_messages
|
|
9
|
+
|
|
10
|
+
from xgae.engine.engine_base import XGATaskResult, XGAResponseMessage
|
|
11
|
+
from xgae.engine.mcp_tool_box import XGAMcpToolBox
|
|
12
|
+
from xgae.utils.setup_env import setup_langfuse, setup_logging
|
|
13
|
+
from xgae.utils import handle_error
|
|
14
|
+
from xgae.utils.misc import read_file
|
|
15
|
+
|
|
16
|
+
class TaskState(TypedDict, total=False):
|
|
17
|
+
"""State definition for the agent orchestration graph"""
|
|
18
|
+
messages: Annotated[Sequence[BaseMessage], add_messages]
|
|
19
|
+
user_input: str
|
|
20
|
+
next_node: str
|
|
21
|
+
context: Dict[str, Any]
|
|
22
|
+
system_prompt: str
|
|
23
|
+
custom_tools: List[str]
|
|
24
|
+
general_tools: List[str]
|
|
25
|
+
task_result: XGATaskResult
|
|
26
|
+
formatted_result: XGATaskResult
|
|
27
|
+
iteration_count: int
|
|
28
|
+
|
|
29
|
+
langfuse = setup_langfuse()
|
|
30
|
+
|
|
31
|
+
class XGAReactAgent:
|
|
32
|
+
MAX_TASK_RETRY = 2
|
|
33
|
+
def __init__(self):
|
|
34
|
+
self.tool_box = XGAMcpToolBox()
|
|
35
|
+
|
|
36
|
+
async def _create_graph(self) -> StateGraph:
|
|
37
|
+
try:
|
|
38
|
+
graph_builder = StateGraph(TaskState)
|
|
39
|
+
|
|
40
|
+
# Add nodes
|
|
41
|
+
graph_builder.add_node("supervisor", self._supervisor_node)
|
|
42
|
+
graph_builder.add_node("select_tool", self._select_tool_node)
|
|
43
|
+
graph_builder.add_node("exec_task", self._exec_task_node)
|
|
44
|
+
graph_builder.add_node("eval_result", self._eval_result_node)
|
|
45
|
+
graph_builder.add_node("format_result", self._format_result_node)
|
|
46
|
+
|
|
47
|
+
# Add edges
|
|
48
|
+
graph_builder.add_edge(START, "supervisor")
|
|
49
|
+
graph_builder.add_conditional_edges(
|
|
50
|
+
"supervisor",
|
|
51
|
+
self._next_condition,
|
|
52
|
+
{
|
|
53
|
+
"select_tool": "select_tool",
|
|
54
|
+
"exec_task": "exec_task",
|
|
55
|
+
"format_result": "format_result"
|
|
56
|
+
}
|
|
57
|
+
)
|
|
58
|
+
|
|
59
|
+
graph_builder.add_edge("select_tool", "exec_task")
|
|
60
|
+
graph_builder.add_edge("exec_task", "eval_result")
|
|
61
|
+
|
|
62
|
+
graph_builder.add_conditional_edges(
|
|
63
|
+
"eval_result",
|
|
64
|
+
self._next_condition,
|
|
65
|
+
{
|
|
66
|
+
"retry": "supervisor",
|
|
67
|
+
"format_result": "format_result",
|
|
68
|
+
}
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
graph_builder.add_edge("format_result", END)
|
|
72
|
+
|
|
73
|
+
graph = graph_builder.compile(checkpointer=MemorySaver())
|
|
74
|
+
graph.name = "XGARectAgent"
|
|
75
|
+
|
|
76
|
+
return graph
|
|
77
|
+
except Exception as e:
|
|
78
|
+
logging.error("Failed to create XGARectAgent graph: %s", str(e))
|
|
79
|
+
raise
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
async def _supervisor_node(self, state: TaskState) -> Dict[str, Any]:
|
|
83
|
+
user_input = state.get("user_input", "")
|
|
84
|
+
system_prompt = None if "fault" in user_input else read_file("templates/example/fault_user_prompt.txt")
|
|
85
|
+
return {
|
|
86
|
+
"system_prompt" : system_prompt,
|
|
87
|
+
"next_node" : "select_tool",
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
async def _select_tool_node(self, state: TaskState) -> Dict[str, Any]:
|
|
91
|
+
system_prompt = state.get("system_prompt",None)
|
|
92
|
+
general_tools = ["*"] if system_prompt else []
|
|
93
|
+
custom_tools = ["*"] if not system_prompt else []
|
|
94
|
+
return {
|
|
95
|
+
"general_tools" : general_tools,
|
|
96
|
+
"custom_tools" : custom_tools,
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
async def _exec_task_node(self, state: TaskState) -> Dict[str, Any]:
|
|
100
|
+
task_result = XGATaskResult(type="answer", content="test task result")
|
|
101
|
+
return {
|
|
102
|
+
"task_result" : task_result
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
async def _eval_result_node(self, state: TaskState) -> Dict[str, Any]:
|
|
106
|
+
next_node = "end"
|
|
107
|
+
return {
|
|
108
|
+
"next_node" : next_node
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
async def _format_result_node(self, state: TaskState) -> Dict[str, Any]:
|
|
112
|
+
formatted_result = state.get("task_result")
|
|
113
|
+
return {
|
|
114
|
+
"formatted_result" : formatted_result,
|
|
115
|
+
"messages": state["messages"] + [AIMessage(content=f"")]
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
def _next_condition(self, state: TaskState) -> str:
|
|
119
|
+
next_node = state.get("next_node")
|
|
120
|
+
return next_node
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
async def generate(self, user_input: str) -> XGATaskResult:
|
|
124
|
+
result = None
|
|
125
|
+
try:
|
|
126
|
+
logging.info("****** Start React Agent for user_input: %s", user_input)
|
|
127
|
+
|
|
128
|
+
# Create graph if not already created
|
|
129
|
+
if self.graph is None:
|
|
130
|
+
self.graph = await self._create_graph()
|
|
131
|
+
|
|
132
|
+
# Initialize state
|
|
133
|
+
initial_state = {
|
|
134
|
+
"messages": [HumanMessage(content=f"information for: {user_input}")],
|
|
135
|
+
"user_input": user_input,
|
|
136
|
+
"next_node": None,
|
|
137
|
+
"tasks": [],
|
|
138
|
+
"context": "",
|
|
139
|
+
"current_task": None,
|
|
140
|
+
"next_task": None,
|
|
141
|
+
"formatted_result": "",
|
|
142
|
+
"final_error_info": "",
|
|
143
|
+
"iteration_count": 1
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
# Run the retrieval graph with proper configuration
|
|
147
|
+
config = {"recursion_limit": 100,
|
|
148
|
+
"configurable": {"thread_id": "manager_async_generate_thread"}}
|
|
149
|
+
final_state = await self.graph.ainvoke(initial_state, config=config)
|
|
150
|
+
|
|
151
|
+
# Parse and return formatted results
|
|
152
|
+
result = final_state["formatted_result"]
|
|
153
|
+
|
|
154
|
+
logging.info("=" * 100)
|
|
155
|
+
logging.info("User question: %s", user_input)
|
|
156
|
+
logging.info("User answer: %s", result)
|
|
157
|
+
logging.info("=" * 100)
|
|
158
|
+
|
|
159
|
+
return result
|
|
160
|
+
except Exception as e:
|
|
161
|
+
logging.error("### Error ManagerAgent _agent_work for user_input '%s': %s ###", user_input, str(e))
|
|
162
|
+
handle_error(e)
|
|
163
|
+
result = XGATaskResult(type="error", content="Never get result, Unexpected Error")
|
|
164
|
+
return result
|
|
165
|
+
|
|
166
|
+
|
|
167
|
+
if __name__ == "__main__":
|
|
168
|
+
setup_logging()
|
|
169
|
+
|
|
170
|
+
agent = XGAReactAgent()
|
|
171
|
+
user_inputs = [
|
|
172
|
+
"Create a function to sort a list of numbers, sort [6,8,7,5]"
|
|
173
|
+
, "sort [3,2,7,5]"
|
|
174
|
+
]
|
|
175
|
+
for user_input in user_inputs:
|
|
176
|
+
result = agent.generate(user_input)
|
|
177
|
+
print(result)
|
|
@@ -5,12 +5,15 @@ from xgae.engine.task_engine import XGATaskEngine
|
|
|
5
5
|
from xgae.utils.llm_client import LLMConfig
|
|
6
6
|
from xgae.utils.misc import read_file
|
|
7
7
|
|
|
8
|
-
from xgae.utils.setup_env import setup_langfuse
|
|
8
|
+
from xgae.utils.setup_env import setup_langfuse, setup_logging
|
|
9
|
+
|
|
10
|
+
setup_logging()
|
|
11
|
+
langfuse = setup_langfuse()
|
|
9
12
|
|
|
10
13
|
async def main() -> None:
|
|
11
14
|
# Before Run Exec: uv run custom_fault_tools
|
|
12
15
|
tool_box = XGAMcpToolBox(custom_mcp_server_file="mcpservers/custom_servers.json")
|
|
13
|
-
system_prompt = read_file("templates/
|
|
16
|
+
system_prompt = read_file("templates/example/fault_user_prompt.txt")
|
|
14
17
|
|
|
15
18
|
engine = XGATaskEngine(tool_box=tool_box,
|
|
16
19
|
general_tools=[],
|
|
@@ -19,7 +22,6 @@ async def main() -> None:
|
|
|
19
22
|
system_prompt=system_prompt,
|
|
20
23
|
max_auto_run=8)
|
|
21
24
|
|
|
22
|
-
langfuse = setup_langfuse()
|
|
23
25
|
# Two task run in same langfuse trace
|
|
24
26
|
trace_id = langfuse.trace(name="xgae_example_run_human_in_loop").trace_id
|
|
25
27
|
|
|
@@ -3,14 +3,18 @@ import asyncio
|
|
|
3
3
|
from xgae.engine.task_engine import XGATaskEngine
|
|
4
4
|
from xgae.utils.llm_client import LLMConfig
|
|
5
5
|
|
|
6
|
+
from xgae.utils.setup_env import setup_logging
|
|
7
|
+
|
|
8
|
+
setup_logging()
|
|
6
9
|
|
|
7
10
|
async def main() -> None:
|
|
8
|
-
engine = XGATaskEngine(llm_config=LLMConfig(stream=False), max_auto_run=
|
|
11
|
+
engine = XGATaskEngine(llm_config=LLMConfig(stream=False), max_auto_run=3)
|
|
9
12
|
|
|
10
13
|
final_result = await engine.run_task_with_final_answer(
|
|
11
|
-
task_message={"role": "user", "content": "1+
|
|
14
|
+
task_message={"role": "user", "content": "1+7"}
|
|
12
15
|
)
|
|
13
16
|
|
|
14
17
|
print("FINAL RESULT:", final_result)
|
|
15
18
|
|
|
19
|
+
|
|
16
20
|
asyncio.run(main())
|
|
@@ -5,11 +5,14 @@ from xgae.engine.task_engine import XGATaskEngine
|
|
|
5
5
|
from xgae.utils.llm_client import LLMConfig
|
|
6
6
|
from xgae.utils.misc import read_file
|
|
7
7
|
|
|
8
|
+
from xgae.utils.setup_env import setup_logging
|
|
9
|
+
|
|
10
|
+
setup_logging()
|
|
8
11
|
|
|
9
12
|
async def main() -> None:
|
|
10
13
|
# Before Run Exec: uv run custom_fault_tools
|
|
11
14
|
tool_box = XGAMcpToolBox(custom_mcp_server_file="mcpservers/custom_servers.json")
|
|
12
|
-
system_prompt = read_file("templates/
|
|
15
|
+
system_prompt = read_file("templates/example/fault_user_prompt.txt")
|
|
13
16
|
|
|
14
17
|
engine = XGATaskEngine(tool_box=tool_box,
|
|
15
18
|
general_tools=[],
|
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import sys
|
|
3
|
+
|
|
4
|
+
from xgae.engine.mcp_tool_box import XGAMcpToolBox
|
|
5
|
+
from xgae.engine.task_engine import XGATaskEngine
|
|
6
|
+
from xgae.utils.llm_client import LLMConfig
|
|
7
|
+
from xgae.utils.misc import read_file
|
|
8
|
+
|
|
9
|
+
from xgae.utils.setup_env import setup_langfuse, setup_env_logging
|
|
10
|
+
|
|
11
|
+
setup_env_logging()
|
|
12
|
+
langfuse = setup_langfuse()
|
|
13
|
+
|
|
14
|
+
def get_user_message(question)-> str:
|
|
15
|
+
while True:
|
|
16
|
+
user_message = input(f"\n💬 {question}: ")
|
|
17
|
+
if user_message.lower() == 'exit':
|
|
18
|
+
print("\n====== Extreme General Agent Engine CLI EXIT ======")
|
|
19
|
+
sys.exit()
|
|
20
|
+
|
|
21
|
+
if not user_message.strip():
|
|
22
|
+
print("\nuser message is empty, input agin !!!\n")
|
|
23
|
+
continue
|
|
24
|
+
|
|
25
|
+
return user_message
|
|
26
|
+
|
|
27
|
+
async def cli() -> None:
|
|
28
|
+
await asyncio.sleep(1)
|
|
29
|
+
print("\n====== Extreme General Agent Engine CLI START ======")
|
|
30
|
+
user_message = input("\n💬 Start Custom MCP Server and Load User Prompt (Yes/No): ")
|
|
31
|
+
tool_box = None
|
|
32
|
+
system_prompt = None
|
|
33
|
+
general_tools = []
|
|
34
|
+
custom_tools = []
|
|
35
|
+
if user_message.lower() == 'yes':
|
|
36
|
+
print(f"--- Start Custom MCP Server in custom_servers.json")
|
|
37
|
+
print(f"--- Load User Prompt in example/fault_user_prompt.txt")
|
|
38
|
+
tool_box = XGAMcpToolBox(custom_mcp_server_file="mcpservers/custom_servers.json")
|
|
39
|
+
system_prompt = read_file("templates/example/fault_user_prompt.txt")
|
|
40
|
+
custom_tools = ["*"]
|
|
41
|
+
else:
|
|
42
|
+
print(f"--- Start General Agent Server")
|
|
43
|
+
print(f"--- Load System Prompt")
|
|
44
|
+
general_tools = ["*"]
|
|
45
|
+
|
|
46
|
+
while True:
|
|
47
|
+
user_message = get_user_message("Enter your message (or 'exit' to quit)")
|
|
48
|
+
|
|
49
|
+
print("\n🔄 Running XGA Engine ...\n")
|
|
50
|
+
engine = XGATaskEngine(tool_box=tool_box,
|
|
51
|
+
general_tools=general_tools,
|
|
52
|
+
custom_tools=custom_tools,
|
|
53
|
+
llm_config=LLMConfig(stream=False),
|
|
54
|
+
system_prompt=system_prompt,
|
|
55
|
+
max_auto_run=8)
|
|
56
|
+
|
|
57
|
+
# Two task run in same langfuse trace
|
|
58
|
+
trace_id = langfuse.trace(name="xgae_cli").trace_id
|
|
59
|
+
|
|
60
|
+
final_result = await engine.run_task_with_final_answer(
|
|
61
|
+
task_message={"role": "user", "content": user_message},
|
|
62
|
+
trace_id=trace_id
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
if final_result["type"] == "ask":
|
|
66
|
+
await asyncio.sleep(1)
|
|
67
|
+
print(f"\n📌 ASK INFO: {final_result['content']}")
|
|
68
|
+
user_message = get_user_message("Enter ASK information (or 'exit' to quit)")
|
|
69
|
+
final_result = await engine.run_task_with_final_answer(
|
|
70
|
+
task_message={"role": "user", "content": user_message},
|
|
71
|
+
trace_id=trace_id
|
|
72
|
+
)
|
|
73
|
+
|
|
74
|
+
await asyncio.sleep(1)
|
|
75
|
+
result_prefix = "✅" if final_result["type"] == "answer" else "❌"
|
|
76
|
+
if final_result["type"] == "ask":
|
|
77
|
+
print("\n *** IMPORTANT: XGA CLI only support showing ONE TURN ASK !")
|
|
78
|
+
result_prefix = "⚠️"
|
|
79
|
+
print(f"\n {result_prefix} FINAL RESULT: {final_result['content']}")
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
def main():
|
|
83
|
+
asyncio.run(cli())
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
if __name__ == "__main__":
|
|
87
|
+
main()
|
|
@@ -82,6 +82,9 @@ class NonStreamTaskResponser(TaskResponseProcessor):
|
|
|
82
82
|
finish_reason = "completed"
|
|
83
83
|
break
|
|
84
84
|
tool_index += 1
|
|
85
|
+
else:
|
|
86
|
+
finish_reason = "non_tool_call"
|
|
87
|
+
logging.warning(f"NonStreamTask: tool_calls is empty, No Tool need to call !")
|
|
85
88
|
|
|
86
89
|
if finish_reason:
|
|
87
90
|
finish_content = {"status_type": "finish", "finish_reason": finish_reason}
|
|
@@ -117,7 +117,7 @@ class XGATaskEngine:
|
|
|
117
117
|
|
|
118
118
|
self.task_prompt = self.prompt_builder.build_task_prompt(self.model_name, general_tool_schemas, custom_tool_schemas)
|
|
119
119
|
|
|
120
|
-
logging.info("*" *
|
|
120
|
+
logging.info("*" * 10 + f" XGATaskEngine Task'{self.task_id}' Initialized " + "*" * 10)
|
|
121
121
|
logging.info(f"model_name={self.model_name}, is_stream={self.is_stream}")
|
|
122
122
|
logging.info(f"general_tools={general_tools}, custom_tools={custom_tools}")
|
|
123
123
|
|
|
@@ -159,6 +159,10 @@ class XGATaskEngine:
|
|
|
159
159
|
logging.warning(f"run_task_auto: Detected finish_reason='xml_tool_limit_reached', stop auto-continue")
|
|
160
160
|
auto_continue = False
|
|
161
161
|
break
|
|
162
|
+
elif finish_reason == 'non_tool_call':
|
|
163
|
+
logging.warning(f"run_task_auto: Detected finish_reason='non_tool_call', stop auto-continue")
|
|
164
|
+
auto_continue = False
|
|
165
|
+
break
|
|
162
166
|
elif finish_reason == 'stop' or finish_reason == 'length': # 'length' never occur
|
|
163
167
|
auto_continue = True
|
|
164
168
|
auto_continue_count += 1
|
|
@@ -191,14 +195,14 @@ class XGATaskEngine:
|
|
|
191
195
|
}
|
|
192
196
|
llm_messages.append(temp_assistant_message)
|
|
193
197
|
|
|
194
|
-
|
|
195
|
-
langfuse_metadata = self.task_langfuse.create_llm_langfuse_meta(
|
|
198
|
+
auto_count = continuous_state.get("auto_continue_count")
|
|
199
|
+
langfuse_metadata = self.task_langfuse.create_llm_langfuse_meta(auto_count)
|
|
196
200
|
|
|
197
201
|
llm_response = await self.llm_client.create_completion(llm_messages, langfuse_metadata)
|
|
198
202
|
response_processor = self._create_response_processer()
|
|
199
203
|
|
|
200
204
|
async for chunk in response_processor.process_response(llm_response, llm_messages, continuous_state):
|
|
201
|
-
self._logging_reponse_chunk(chunk)
|
|
205
|
+
self._logging_reponse_chunk(chunk, auto_count)
|
|
202
206
|
yield chunk
|
|
203
207
|
|
|
204
208
|
def _parse_final_result(self, chunks: List[Dict[str, Any]]) -> XGATaskResult:
|
|
@@ -238,13 +242,21 @@ class XGATaskEngine:
|
|
|
238
242
|
result_type = "answer" if success else "error"
|
|
239
243
|
result_content = f"Task execute '{tool_name}' {result_type}: {output}"
|
|
240
244
|
final_result = XGATaskResult(type=result_type, content=result_content)
|
|
241
|
-
elif chunk_type == "assistant_complete" and finish_reason == '
|
|
245
|
+
elif chunk_type == "assistant_complete" and finish_reason == 'non_tool_call':
|
|
242
246
|
assis_content = chunk.get('content', {})
|
|
243
247
|
result_content = assis_content.get("content", "LLM output is empty")
|
|
244
248
|
final_result = XGATaskResult(type="answer", content=result_content)
|
|
245
249
|
|
|
246
250
|
if final_result is not None:
|
|
247
251
|
break
|
|
252
|
+
|
|
253
|
+
if final_result and finish_reason == "completed":
|
|
254
|
+
logging.info(f"✅ FINAL_RESULT: finish_reason={finish_reason}, final_result={final_result}")
|
|
255
|
+
elif final_result is not None:
|
|
256
|
+
logging.warning(f"⚠️ FINAL_RESULT: finish_reason={finish_reason}, final_result={final_result}")
|
|
257
|
+
else:
|
|
258
|
+
logging.warning(f"❌ FINAL_RESULT: LLM Result is EMPTY, finish_reason={finish_reason}")
|
|
259
|
+
final_result = XGATaskResult(type="error", content="LLM has no answer")
|
|
248
260
|
except Exception as e:
|
|
249
261
|
logging.error(f"parse_final_result: Final result pass error: {str(e)}")
|
|
250
262
|
final_result = XGATaskResult(type="error", content="Parse final result failed!")
|
|
@@ -328,7 +340,7 @@ class XGATaskEngine:
|
|
|
328
340
|
return XGATaskLangFuse(self.session_id, self.task_id, self.task_run_id, self.task_no, self.agent_id)
|
|
329
341
|
|
|
330
342
|
|
|
331
|
-
def _logging_reponse_chunk(self, chunk):
|
|
343
|
+
def _logging_reponse_chunk(self, chunk, auto_count: int) -> None:
|
|
332
344
|
chunk_type = chunk.get('type')
|
|
333
345
|
prefix = ""
|
|
334
346
|
|
|
@@ -342,17 +354,20 @@ class XGATaskEngine:
|
|
|
342
354
|
tool_name = tool_execution.get('function_name')
|
|
343
355
|
prefix = "-" + tool_name
|
|
344
356
|
|
|
345
|
-
logging.info(f"TASK_RESP_CHUNK[{chunk_type}{prefix}
|
|
357
|
+
logging.info(f"TASK_RESP_CHUNK[{auto_count}]<{chunk_type}{prefix}>: {chunk}")
|
|
346
358
|
|
|
347
359
|
|
|
348
360
|
if __name__ == "__main__":
|
|
349
361
|
import asyncio
|
|
350
362
|
from xgae.utils.misc import read_file
|
|
363
|
+
from xgae.utils.setup_env import setup_logging
|
|
364
|
+
|
|
365
|
+
setup_logging()
|
|
351
366
|
|
|
352
367
|
async def main():
|
|
353
368
|
# Before Run Exec: uv run custom_fault_tools
|
|
354
369
|
tool_box = XGAMcpToolBox(custom_mcp_server_file="mcpservers/custom_servers.json")
|
|
355
|
-
system_prompt = read_file("templates/
|
|
370
|
+
system_prompt = read_file("templates/example/fault_user_prompt.txt")
|
|
356
371
|
engine = XGATaskEngine(tool_box=tool_box,
|
|
357
372
|
general_tools=[],
|
|
358
373
|
custom_tools=["*"],
|
|
@@ -2,20 +2,22 @@
|
|
|
2
2
|
from typing import Any, Dict, Optional
|
|
3
3
|
from langfuse import Langfuse
|
|
4
4
|
|
|
5
|
-
from xgae.utils.setup_env import setup_langfuse
|
|
5
|
+
from xgae.utils.setup_env import setup_langfuse
|
|
6
6
|
from xgae.utils.llm_client import LangfuseMetadata
|
|
7
7
|
from xgae.engine.engine_base import XGATaskResult
|
|
8
8
|
|
|
9
|
-
setup_env_logging()
|
|
10
|
-
langfuse:Langfuse = setup_langfuse()
|
|
11
|
-
|
|
12
9
|
class XGATaskLangFuse:
|
|
10
|
+
langfuse: Langfuse = None
|
|
11
|
+
|
|
13
12
|
def __init__(self,
|
|
14
13
|
session_id: str,
|
|
15
14
|
task_id:str,
|
|
16
15
|
task_run_id: str,
|
|
17
16
|
task_no: int,
|
|
18
17
|
agent_id: str) -> None:
|
|
18
|
+
if XGATaskLangFuse.langfuse is None:
|
|
19
|
+
XGATaskLangFuse.langfuse = setup_langfuse()
|
|
20
|
+
|
|
19
21
|
self.session_id = session_id
|
|
20
22
|
self.task_id = task_id
|
|
21
23
|
self.task_run_id = task_run_id
|
|
@@ -35,9 +37,9 @@ class XGATaskLangFuse:
|
|
|
35
37
|
trace = None
|
|
36
38
|
if trace_id:
|
|
37
39
|
self.trace_id = trace_id
|
|
38
|
-
trace = langfuse.trace(id=trace_id)
|
|
40
|
+
trace = XGATaskLangFuse.langfuse.trace(id=trace_id)
|
|
39
41
|
else:
|
|
40
|
-
trace = langfuse.trace(name="xga_task_engine")
|
|
42
|
+
trace = XGATaskLangFuse.langfuse.trace(name="xga_task_engine")
|
|
41
43
|
self.trace_id = trace.id
|
|
42
44
|
|
|
43
45
|
metadata = {"task_id": self.task_id, "session_id": self.session_id, "agent_id": self.agent_id}
|
|
@@ -8,6 +8,7 @@ from typing import Union, Dict, Any, Optional, List, TypedDict
|
|
|
8
8
|
from openai import OpenAIError
|
|
9
9
|
from litellm.utils import ModelResponse, CustomStreamWrapper
|
|
10
10
|
|
|
11
|
+
from xgae.utils import to_bool
|
|
11
12
|
from xgae.utils.setup_env import setup_langfuse
|
|
12
13
|
|
|
13
14
|
class LLMConfig(TypedDict, total=False):
|
|
@@ -46,6 +47,7 @@ class LLMClient:
|
|
|
46
47
|
def __init__(self, llm_config: LLMConfig=None):
|
|
47
48
|
litellm.modify_params = True
|
|
48
49
|
litellm.drop_params = True
|
|
50
|
+
|
|
49
51
|
self._init_langfuse()
|
|
50
52
|
|
|
51
53
|
llm_config = llm_config or LLMConfig()
|
|
@@ -78,22 +80,27 @@ class LLMClient:
|
|
|
78
80
|
self.is_stream = llm_config_params['stream']
|
|
79
81
|
|
|
80
82
|
self.lite_llm_params = self._prepare_llm_params(llm_config_params)
|
|
81
|
-
logging.info(f"
|
|
83
|
+
logging.info(f"=== LLMClient initialed : model={self.model_name}, is_stream={self.is_stream}, enable thinking={self.lite_llm_params['enable_thinking']}")
|
|
82
84
|
|
|
83
85
|
@staticmethod
|
|
84
86
|
def _init_langfuse():
|
|
85
87
|
if not LLMClient.langfuse_inited:
|
|
86
88
|
LLMClient.langfuse_inited =True
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
89
|
+
|
|
90
|
+
env_llm_langfuse_enable = to_bool(os.getenv("LLM_LANGFUSE_ENABLE", False))
|
|
91
|
+
if env_llm_langfuse_enable:
|
|
92
|
+
env_langfuse = setup_langfuse()
|
|
93
|
+
if env_langfuse and env_langfuse.enabled:
|
|
94
|
+
litellm.success_callback = ["langfuse"]
|
|
95
|
+
litellm.failure_callback = ["langfuse"]
|
|
96
|
+
LLMClient.langfuse_enabled = True
|
|
97
|
+
logging.info("🛠️ LiteLLM Langfuse is enable !")
|
|
98
|
+
else:
|
|
99
|
+
LLMClient.langfuse_enabled = False
|
|
100
|
+
logging.warning("🛠️ LiteLLM Langfuse is disable, langfuse.enabled=false !")
|
|
93
101
|
else:
|
|
94
102
|
LLMClient.langfuse_enabled = False
|
|
95
|
-
logging.warning("
|
|
96
|
-
|
|
103
|
+
logging.warning("🛠️ LiteLLM Langfuse is disable, LLM_LANGFUSE_ENABLE=False !")
|
|
97
104
|
|
|
98
105
|
def _prepare_llm_params(self, llm_config_params: Dict[str, Any]) -> Dict[str, Any]:
|
|
99
106
|
prepared_llm_params = llm_config_params.copy()
|
|
@@ -244,14 +251,16 @@ if __name__ == "__main__":
|
|
|
244
251
|
setup_logging()
|
|
245
252
|
langfuse = setup_langfuse()
|
|
246
253
|
|
|
247
|
-
async def
|
|
254
|
+
async def main():
|
|
248
255
|
llm_client = LLMClient(LLMConfig(stream=False))
|
|
249
256
|
|
|
250
257
|
messages = [{"role": "user", "content": "1+1="}]
|
|
251
258
|
trace_id = langfuse.trace(name = "xgae_litellm_test").trace_id
|
|
259
|
+
await asyncio.sleep(1)
|
|
260
|
+
|
|
252
261
|
meta = LangfuseMetadata(
|
|
253
262
|
generation_name="llm_completion_test",
|
|
254
|
-
generation_id="
|
|
263
|
+
generation_id="generation_id_0",
|
|
255
264
|
existing_trace_id=trace_id,
|
|
256
265
|
session_id="session_0",
|
|
257
266
|
)
|
|
@@ -269,7 +278,6 @@ if __name__ == "__main__":
|
|
|
269
278
|
else:
|
|
270
279
|
print(response.choices[0].message.content)
|
|
271
280
|
|
|
272
|
-
|
|
273
|
-
asyncio.run(llm_completion())
|
|
281
|
+
asyncio.run(main())
|
|
274
282
|
|
|
275
283
|
|
|
@@ -52,7 +52,7 @@ def setup_logging(log_file: str=None, log_level: str="INFO") :
|
|
|
52
52
|
|
|
53
53
|
logger.setLevel(logging_level)
|
|
54
54
|
|
|
55
|
-
logging.info(f"
|
|
55
|
+
logging.info(f"🛠️ XGA_LOGGING is initialized, log_level={log_level}, log_file={log_file}")
|
|
56
56
|
|
|
57
57
|
def setup_env_logging():
|
|
58
58
|
log_enable = to_bool(os.getenv("LOG_ENABLE", True))
|
|
@@ -71,10 +71,10 @@ def setup_langfuse() -> Langfuse:
|
|
|
71
71
|
secret_key=env_secret_key,
|
|
72
72
|
host=env_host)
|
|
73
73
|
|
|
74
|
-
logging.info("
|
|
74
|
+
logging.info("🛠️ XGA_LANGFUSE initialized Successfully by Key !")
|
|
75
75
|
else:
|
|
76
76
|
_langfuse = Langfuse(enabled=False)
|
|
77
|
-
logging.warning("
|
|
77
|
+
logging.warning("🛠️ XGA_LANGFUSE Not set key, Langfuse is disabled!")
|
|
78
78
|
|
|
79
79
|
return _langfuse
|
|
80
80
|
|
xgae-0.1.9/src/xgae/__init__.py
DELETED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
/xgae-0.1.9/templates/example_user_prompt.txt → /xgae-0.1.10/templates/example/fault_user_prompt.txt
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|