xgae 0.1.4__py3-none-any.whl → 0.1.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of xgae might be problematic. Click here for more details.
- xgae/engine/{xga_base.py → engine_base.py} +12 -15
- xgae/engine/{xga_mcp_tool_box.py → mcp_tool_box.py} +6 -9
- xgae/engine/{xga_prompt_builder.py → prompt_builder.py} +3 -2
- xgae/engine/responser/non_stream_responser.py +110 -0
- xgae/engine/responser/{xga_responser_base.py → responser_base.py} +103 -228
- xgae/engine/responser/{xga_stream_responser.py → stream_responser.py} +94 -55
- xgae/engine/task_engine.py +360 -0
- xgae/utils/__init__.py +13 -0
- xgae/utils/{utils.py → misc.py} +0 -8
- xgae/utils/setup_env.py +51 -66
- xgae/utils/xml_tool_parser.py +4 -7
- {xgae-0.1.4.dist-info → xgae-0.1.6.dist-info}/METADATA +1 -1
- xgae-0.1.6.dist-info/RECORD +17 -0
- xgae/engine/responser/xga_non_stream_responser.py +0 -213
- xgae/engine/xga_engine.py +0 -278
- xgae-0.1.4.dist-info/RECORD +0 -16
- {xgae-0.1.4.dist-info → xgae-0.1.6.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,360 @@
|
|
|
1
|
+
|
|
2
|
+
import logging
|
|
3
|
+
import json
|
|
4
|
+
|
|
5
|
+
from typing import List, Any, Dict, Optional, AsyncGenerator, Union, Literal
|
|
6
|
+
from uuid import uuid4
|
|
7
|
+
|
|
8
|
+
from xgae.engine.responser.responser_base import TaskResponserContext, TaskResponseProcessor, TaskRunContinuousState
|
|
9
|
+
from xgae.engine.engine_base import XGAResponseMsgType, XGAResponseMessage, XGAToolBox, XGATaskResult
|
|
10
|
+
|
|
11
|
+
from xgae.utils import langfuse, handle_error
|
|
12
|
+
from xgae.utils.llm_client import LLMClient, LLMConfig
|
|
13
|
+
|
|
14
|
+
from xgae.utils.json_helpers import format_for_yield
|
|
15
|
+
from prompt_builder import XGAPromptBuilder
|
|
16
|
+
from mcp_tool_box import XGAMcpToolBox
|
|
17
|
+
|
|
18
|
+
class XGATaskEngine:
|
|
19
|
+
def __init__(self,
|
|
20
|
+
session_id: Optional[str] = None,
|
|
21
|
+
task_id: Optional[str] = None,
|
|
22
|
+
agent_id: Optional[str] = None,
|
|
23
|
+
trace_id: Optional[str] = None,
|
|
24
|
+
system_prompt: Optional[str] = None,
|
|
25
|
+
llm_config: Optional[LLMConfig] = None,
|
|
26
|
+
prompt_builder: Optional[XGAPromptBuilder] = None,
|
|
27
|
+
tool_box: Optional[XGAToolBox] = None):
|
|
28
|
+
self.task_id = task_id if task_id else f"xga_task_{uuid4()}"
|
|
29
|
+
self.agent_id = agent_id
|
|
30
|
+
self.session_id = session_id
|
|
31
|
+
|
|
32
|
+
self.llm_client = LLMClient(llm_config)
|
|
33
|
+
self.model_name = self.llm_client.model_name
|
|
34
|
+
self.is_stream = self.llm_client.is_stream
|
|
35
|
+
|
|
36
|
+
self.prompt_builder = prompt_builder or XGAPromptBuilder(system_prompt)
|
|
37
|
+
self.tool_box = tool_box or XGAMcpToolBox()
|
|
38
|
+
|
|
39
|
+
self.task_response_msgs: List[XGAResponseMessage] = []
|
|
40
|
+
self.task_no = -1
|
|
41
|
+
self.task_run_id = f"{self.task_id}[{self.task_no}]"
|
|
42
|
+
self.trace_id :str = trace_id or langfuse.create_trace_id()
|
|
43
|
+
|
|
44
|
+
async def _post_init_(self, general_tools:List[str], custom_tools: List[str]) -> None:
|
|
45
|
+
await self.tool_box.load_mcp_tools_schema()
|
|
46
|
+
await self.tool_box.creat_task_tool_box(self.task_id, general_tools, custom_tools)
|
|
47
|
+
general_tool_schemas = self.tool_box.get_task_tool_schemas(self.task_id, "general_tool")
|
|
48
|
+
custom_tool_schemas = self.tool_box.get_task_tool_schemas(self.task_id, "custom_tool")
|
|
49
|
+
|
|
50
|
+
self.task_prompt = self.prompt_builder.build_task_prompt(self.model_name, general_tool_schemas, custom_tool_schemas)
|
|
51
|
+
|
|
52
|
+
@classmethod
|
|
53
|
+
async def create(cls,
|
|
54
|
+
session_id: Optional[str] = None,
|
|
55
|
+
task_id: Optional[str] = None,
|
|
56
|
+
agent_id: Optional[str] = None,
|
|
57
|
+
trace_id: Optional[str] = None,
|
|
58
|
+
system_prompt: Optional[str] = None,
|
|
59
|
+
general_tools: Optional[List[str]] = None,
|
|
60
|
+
custom_tools: Optional[List[str]] = None,
|
|
61
|
+
llm_config: Optional[LLMConfig] = None,
|
|
62
|
+
prompt_builder: Optional[XGAPromptBuilder] = None,
|
|
63
|
+
tool_box: Optional[XGAToolBox] = None) -> 'XGATaskEngine':
|
|
64
|
+
engine: XGATaskEngine = cls(session_id=session_id,
|
|
65
|
+
task_id=task_id,
|
|
66
|
+
agent_id=agent_id,
|
|
67
|
+
trace_id=trace_id,
|
|
68
|
+
system_prompt=system_prompt,
|
|
69
|
+
llm_config=llm_config,
|
|
70
|
+
prompt_builder=prompt_builder,
|
|
71
|
+
tool_box=tool_box)
|
|
72
|
+
|
|
73
|
+
general_tools = general_tools or ["complete", "ask"]
|
|
74
|
+
if "*" not in general_tools:
|
|
75
|
+
if "complete" not in general_tools:
|
|
76
|
+
general_tools.append("complete")
|
|
77
|
+
elif "ask" not in general_tools:
|
|
78
|
+
general_tools.append("ask")
|
|
79
|
+
|
|
80
|
+
custom_tools = custom_tools or []
|
|
81
|
+
await engine._post_init_(general_tools, custom_tools)
|
|
82
|
+
|
|
83
|
+
logging.info("*"*30 + f" XGATaskEngine Task'{engine.task_id}' Initialized " + "*"*30)
|
|
84
|
+
logging.info(f"model_name={engine.model_name}, is_stream={engine.is_stream}, trace_id={engine.trace_id}")
|
|
85
|
+
logging.info(f"general_tools={general_tools}, custom_tools={custom_tools}")
|
|
86
|
+
|
|
87
|
+
return engine
|
|
88
|
+
|
|
89
|
+
async def run_task_with_final_answer(self,
|
|
90
|
+
task_message: Dict[str, Any],
|
|
91
|
+
max_auto_run: int = 25,
|
|
92
|
+
trace_id: Optional[str] = None) -> XGATaskResult:
|
|
93
|
+
chunks = []
|
|
94
|
+
async for chunk in self.run_task(task_message=task_message, max_auto_run=max_auto_run, trace_id=trace_id):
|
|
95
|
+
chunks.append(chunk)
|
|
96
|
+
if len(chunks) > 0:
|
|
97
|
+
final_result = self._parse_final_result(chunks)
|
|
98
|
+
else:
|
|
99
|
+
final_result = XGATaskResult(type="error", content="LLM Answer is Empty")
|
|
100
|
+
return final_result
|
|
101
|
+
|
|
102
|
+
async def run_task(self,
|
|
103
|
+
task_message: Dict[str, Any],
|
|
104
|
+
max_auto_run: int = 25,
|
|
105
|
+
trace_id: Optional[str] = None) -> AsyncGenerator[Dict[str, Any], None]:
|
|
106
|
+
try:
|
|
107
|
+
self.trace_id = trace_id or self.trace_id or langfuse.create_trace_id()
|
|
108
|
+
|
|
109
|
+
self.task_no += 1
|
|
110
|
+
self.task_run_id = f"{self.task_id}[{self.task_no}]"
|
|
111
|
+
|
|
112
|
+
self.add_response_message(type="user", content=task_message, is_llm_message=True)
|
|
113
|
+
|
|
114
|
+
continuous_state: TaskRunContinuousState = {
|
|
115
|
+
"accumulated_content": "",
|
|
116
|
+
"auto_continue_count": 0,
|
|
117
|
+
"auto_continue": False if max_auto_run <= 1 else True,
|
|
118
|
+
"max_auto_run": max_auto_run
|
|
119
|
+
}
|
|
120
|
+
async for chunk in self._run_task_auto(continuous_state):
|
|
121
|
+
yield chunk
|
|
122
|
+
finally:
|
|
123
|
+
await self.tool_box.destroy_task_tool_box(self.task_id)
|
|
124
|
+
|
|
125
|
+
async def _run_task_once(self, continuous_state: TaskRunContinuousState) -> AsyncGenerator[Dict[str, Any], None]:
|
|
126
|
+
llm_messages = [{"role": "system", "content": self.task_prompt}]
|
|
127
|
+
cxt_llm_contents = self.get_history_llm_messages()
|
|
128
|
+
llm_messages.extend(cxt_llm_contents)
|
|
129
|
+
|
|
130
|
+
partial_content = continuous_state.get('accumulated_content', '')
|
|
131
|
+
if partial_content:
|
|
132
|
+
temp_assistant_message = {
|
|
133
|
+
"role": "assistant",
|
|
134
|
+
"content": partial_content
|
|
135
|
+
}
|
|
136
|
+
llm_messages.append(temp_assistant_message)
|
|
137
|
+
|
|
138
|
+
llm_response = await self.llm_client.create_completion(llm_messages)
|
|
139
|
+
response_processor = self._create_response_processer()
|
|
140
|
+
|
|
141
|
+
async for chunk in response_processor.process_response(llm_response, llm_messages, continuous_state):
|
|
142
|
+
self._logging_reponse_chunk(chunk)
|
|
143
|
+
yield chunk
|
|
144
|
+
|
|
145
|
+
async def _run_task_auto(self, continuous_state: TaskRunContinuousState) -> AsyncGenerator[Dict[str, Any], None]:
|
|
146
|
+
max_auto_run = continuous_state['max_auto_run']
|
|
147
|
+
max_auto_run = max_auto_run if max_auto_run > 0 else 1
|
|
148
|
+
|
|
149
|
+
def update_continuous_state(_auto_continue_count, _auto_continue):
|
|
150
|
+
continuous_state["auto_continue_count"] = _auto_continue_count
|
|
151
|
+
continuous_state["auto_continue"] = _auto_continue
|
|
152
|
+
|
|
153
|
+
auto_continue_count = 0
|
|
154
|
+
auto_continue = True
|
|
155
|
+
while auto_continue and auto_continue_count < max_auto_run:
|
|
156
|
+
auto_continue = False
|
|
157
|
+
|
|
158
|
+
try:
|
|
159
|
+
async for chunk in self._run_task_once(continuous_state):
|
|
160
|
+
yield chunk
|
|
161
|
+
try:
|
|
162
|
+
if chunk.get("type") == "status":
|
|
163
|
+
content = json.loads(chunk.get('content', '{}'))
|
|
164
|
+
status_type = content.get('status_type', None)
|
|
165
|
+
if status_type == "error":
|
|
166
|
+
logging.error(f"run_task_auto: task_response error: {chunk.get('message', 'Unknown error')}")
|
|
167
|
+
auto_continue = False
|
|
168
|
+
break
|
|
169
|
+
elif status_type == 'finish':
|
|
170
|
+
finish_reason = content.get('finish_reason', None)
|
|
171
|
+
if finish_reason == 'completed':
|
|
172
|
+
logging.info(f"run_task_auto: Detected finish_reason='completed', TASK_COMPLETE Success !")
|
|
173
|
+
auto_continue = False
|
|
174
|
+
break
|
|
175
|
+
elif finish_reason == 'xml_tool_limit_reached':
|
|
176
|
+
logging.warning(f"run_task_auto: Detected finish_reason='xml_tool_limit_reached', stop auto-continue")
|
|
177
|
+
auto_continue = False
|
|
178
|
+
break
|
|
179
|
+
elif finish_reason == 'stop' or finish_reason == 'length': # 'length' never occur
|
|
180
|
+
auto_continue = True
|
|
181
|
+
auto_continue_count += 1
|
|
182
|
+
update_continuous_state(auto_continue_count, auto_continue)
|
|
183
|
+
logging.info(f"run_task_auto: Detected finish_reason='{finish_reason}', auto-continuing ({auto_continue_count}/{max_auto_run})")
|
|
184
|
+
except Exception as parse_error:
|
|
185
|
+
logging.error(f"run_task_auto: Error in parse chunk: {str(parse_error)}")
|
|
186
|
+
content = {"role": "system", "status_type": "error", "message": "Parse response chunk Error"}
|
|
187
|
+
handle_error(parse_error)
|
|
188
|
+
error_msg = self.add_response_message(type="status", content=content, is_llm_message=False)
|
|
189
|
+
yield format_for_yield(error_msg)
|
|
190
|
+
except Exception as run_error:
|
|
191
|
+
logging.error(f"run_task_auto: Call task_run_once error: {str(run_error)}")
|
|
192
|
+
content = {"role": "system", "status_type": "error", "message": "Call task_run_once error"}
|
|
193
|
+
handle_error(run_error)
|
|
194
|
+
error_msg = self.add_response_message(type="status", content=content, is_llm_message=False)
|
|
195
|
+
yield format_for_yield(error_msg)
|
|
196
|
+
|
|
197
|
+
def _parse_final_result(self, chunks: List[Dict[str, Any]]) -> XGATaskResult:
|
|
198
|
+
final_result: XGATaskResult = None
|
|
199
|
+
try:
|
|
200
|
+
finish_reason = ''
|
|
201
|
+
for chunk in reversed(chunks):
|
|
202
|
+
chunk_type = chunk.get("type")
|
|
203
|
+
if chunk_type == "status":
|
|
204
|
+
status_content = json.loads(chunk.get('content', '{}'))
|
|
205
|
+
status_type = status_content.get('status_type', None)
|
|
206
|
+
if status_type == "error":
|
|
207
|
+
error = status_content.get('message', 'Unknown error')
|
|
208
|
+
final_result = XGATaskResult(type="error", content=error)
|
|
209
|
+
elif status_type == "finish":
|
|
210
|
+
finish_reason = status_content.get('finish_reason', None)
|
|
211
|
+
if finish_reason == 'xml_tool_limit_reached':
|
|
212
|
+
error = "Completed due to over task max_auto_run limit !"
|
|
213
|
+
final_result = XGATaskResult(type="error", content=error)
|
|
214
|
+
elif chunk_type == "tool" and finish_reason in ['completed', 'stop']:
|
|
215
|
+
tool_content = json.loads(chunk.get('content', '{}'))
|
|
216
|
+
tool_execution = tool_content.get('tool_execution')
|
|
217
|
+
tool_name = tool_execution.get('function_name')
|
|
218
|
+
if tool_name == "complete":
|
|
219
|
+
result_content = tool_execution["arguments"].get("text", "Task completed with no answer")
|
|
220
|
+
attachments = tool_execution["arguments"].get("attachments", None)
|
|
221
|
+
final_result = XGATaskResult(type="answer", content=result_content, attachments=attachments)
|
|
222
|
+
elif tool_name == "ask":
|
|
223
|
+
result_content = tool_execution["arguments"].get("text", "Task ask for more info")
|
|
224
|
+
attachments = tool_execution["arguments"].get("attachments", None)
|
|
225
|
+
final_result = XGATaskResult(type="ask", content=result_content, attachments=attachments)
|
|
226
|
+
else:
|
|
227
|
+
tool_result = tool_execution.get("result", None)
|
|
228
|
+
if tool_result is not None:
|
|
229
|
+
success = tool_result.get("success")
|
|
230
|
+
output = tool_result.get("output")
|
|
231
|
+
result_type = "answer" if success else "error"
|
|
232
|
+
result_content = f"Task execute '{tool_name}' {result_type}: {output}"
|
|
233
|
+
final_result = XGATaskResult(type=result_type, content=result_content)
|
|
234
|
+
elif chunk_type == "assistant" and finish_reason == 'stop':
|
|
235
|
+
assis_content = chunk.get('content', {})
|
|
236
|
+
result_content = assis_content.get("content", "LLM output is empty")
|
|
237
|
+
final_result = XGATaskResult(type="answer", content=result_content)
|
|
238
|
+
|
|
239
|
+
if final_result is not None:
|
|
240
|
+
break
|
|
241
|
+
except Exception as e:
|
|
242
|
+
logging.error(f"parse_final_result: Final result pass error: {str(e)}")
|
|
243
|
+
final_result = XGATaskResult(type="error", content="Parse final result failed!")
|
|
244
|
+
handle_error(e)
|
|
245
|
+
|
|
246
|
+
return final_result
|
|
247
|
+
|
|
248
|
+
def add_response_message(self, type: XGAResponseMsgType,
|
|
249
|
+
content: Union[Dict[str, Any], List[Any], str],
|
|
250
|
+
is_llm_message: bool,
|
|
251
|
+
metadata: Optional[Dict[str, Any]]=None)-> XGAResponseMessage:
|
|
252
|
+
metadata = metadata or {}
|
|
253
|
+
metadata["task_id"] = self.task_id
|
|
254
|
+
metadata["task_run_id"] = self.task_run_id
|
|
255
|
+
metadata["trace_id"] = self.trace_id
|
|
256
|
+
metadata["session_id"] = self.session_id
|
|
257
|
+
metadata["agent_id"] = self.agent_id
|
|
258
|
+
|
|
259
|
+
message = XGAResponseMessage(
|
|
260
|
+
message_id = f"xga_msg_{uuid4()}",
|
|
261
|
+
type = type,
|
|
262
|
+
is_llm_message=is_llm_message,
|
|
263
|
+
content = content,
|
|
264
|
+
metadata = metadata
|
|
265
|
+
)
|
|
266
|
+
self.task_response_msgs.append(message)
|
|
267
|
+
|
|
268
|
+
return message
|
|
269
|
+
|
|
270
|
+
def get_history_llm_messages (self) -> List[Dict[str, Any]]:
|
|
271
|
+
llm_messages = []
|
|
272
|
+
for message in self.task_response_msgs:
|
|
273
|
+
if message["is_llm_message"]:
|
|
274
|
+
llm_messages.append(message)
|
|
275
|
+
|
|
276
|
+
response_llm_contents = []
|
|
277
|
+
for llm_message in llm_messages:
|
|
278
|
+
content = llm_message["content"]
|
|
279
|
+
# @todo content List type
|
|
280
|
+
if isinstance(content, str):
|
|
281
|
+
try:
|
|
282
|
+
_content = json.loads(content)
|
|
283
|
+
response_llm_contents.append(_content)
|
|
284
|
+
except json.JSONDecodeError as e:
|
|
285
|
+
logging.error(f"get_context_llm_contents: Failed to decode json, content=:{content}")
|
|
286
|
+
handle_error(e)
|
|
287
|
+
else:
|
|
288
|
+
response_llm_contents.append(content)
|
|
289
|
+
|
|
290
|
+
return response_llm_contents
|
|
291
|
+
|
|
292
|
+
def _create_response_processer(self) -> TaskResponseProcessor:
|
|
293
|
+
response_context = self._create_response_context()
|
|
294
|
+
is_stream = response_context.get("is_stream", False)
|
|
295
|
+
if is_stream:
|
|
296
|
+
from xgae.engine.responser.stream_responser import StreamTaskResponser
|
|
297
|
+
return StreamTaskResponser(response_context)
|
|
298
|
+
else:
|
|
299
|
+
from xgae.engine.responser.non_stream_responser import NonStreamTaskResponser
|
|
300
|
+
return NonStreamTaskResponser(response_context)
|
|
301
|
+
|
|
302
|
+
def _create_response_context(self) -> TaskResponserContext:
|
|
303
|
+
response_context: TaskResponserContext = {
|
|
304
|
+
"is_stream": self.is_stream,
|
|
305
|
+
"task_id": self.task_id,
|
|
306
|
+
"task_run_id": self.task_run_id,
|
|
307
|
+
"trace_id": self.trace_id,
|
|
308
|
+
"model_name": self.model_name,
|
|
309
|
+
"max_xml_tool_calls": 0,
|
|
310
|
+
"add_response_msg_func": self.add_response_message,
|
|
311
|
+
"tool_box": self.tool_box,
|
|
312
|
+
"tool_execution_strategy": "sequential" ,#"parallel",
|
|
313
|
+
"xml_adding_strategy": "user_message",
|
|
314
|
+
}
|
|
315
|
+
return response_context
|
|
316
|
+
|
|
317
|
+
|
|
318
|
+
def _logging_reponse_chunk(self, chunk):
|
|
319
|
+
chunk_type = chunk.get('type')
|
|
320
|
+
prefix = ""
|
|
321
|
+
|
|
322
|
+
if chunk_type == 'status':
|
|
323
|
+
content = json.loads(chunk.get('content', '{}'))
|
|
324
|
+
status_type = content.get('status_type', "empty")
|
|
325
|
+
prefix = "-" + status_type
|
|
326
|
+
elif chunk_type == 'tool':
|
|
327
|
+
tool_content = json.loads(chunk.get('content', '{}'))
|
|
328
|
+
tool_execution = tool_content.get('tool_execution')
|
|
329
|
+
tool_name = tool_execution.get('function_name')
|
|
330
|
+
prefix = "-" + tool_name
|
|
331
|
+
|
|
332
|
+
logging.info(f"TASK_RESP_CHUNK[{chunk_type}{prefix}]: {chunk}")
|
|
333
|
+
|
|
334
|
+
|
|
335
|
+
if __name__ == "__main__":
|
|
336
|
+
import asyncio
|
|
337
|
+
from xgae.utils.misc import read_file
|
|
338
|
+
|
|
339
|
+
async def main():
|
|
340
|
+
tool_box = XGAMcpToolBox(custom_mcp_server_file="mcpservers/custom_servers.json")
|
|
341
|
+
system_prompt = read_file("templates/scp_test_prompt.txt")
|
|
342
|
+
engine = await XGATaskEngine.create(tool_box=tool_box,
|
|
343
|
+
general_tools=[],
|
|
344
|
+
custom_tools=["bomc_fault.*"],
|
|
345
|
+
llm_config=LLMConfig(stream=False),
|
|
346
|
+
system_prompt=system_prompt)
|
|
347
|
+
|
|
348
|
+
final_result = await engine.run_task_with_final_answer(task_message={"role": "user", "content": "定位10.0.1.1故障"},max_auto_run=8)
|
|
349
|
+
print("FINAL RESULT:", final_result)
|
|
350
|
+
|
|
351
|
+
# ==== test streaming response ========
|
|
352
|
+
#chunks = []
|
|
353
|
+
# async for chunk in engine.run_task(task_message={"role": "user", "content": "定位10.0.0.1的故障"}, max_auto_run=8):
|
|
354
|
+
# print(chunk)
|
|
355
|
+
|
|
356
|
+
# ==== test no tool call ========
|
|
357
|
+
# engine = await XGATaskEngine.create(llm_config=LLMConfig(stream=False))
|
|
358
|
+
# final_result = await engine.run_task_with_final_answer(task_message={"role": "user", "content": "1+1"}, max_auto_run=2)
|
|
359
|
+
# print("FINAL RESULT:", final_result)
|
|
360
|
+
asyncio.run(main())
|
xgae/utils/__init__.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
|
|
3
|
+
from .setup_env import setup_langfuse, setup_logging
|
|
4
|
+
|
|
5
|
+
setup_logging()
|
|
6
|
+
langfuse = setup_langfuse()
|
|
7
|
+
|
|
8
|
+
def handle_error(e: Exception) -> None:
|
|
9
|
+
import traceback
|
|
10
|
+
|
|
11
|
+
logging.error("An error occurred: %s", str(e))
|
|
12
|
+
logging.error("Traceback details:\n%s", traceback.format_exc())
|
|
13
|
+
raise (e) from e
|
xgae/utils/{utils.py → misc.py}
RENAMED
|
@@ -1,17 +1,9 @@
|
|
|
1
1
|
import logging
|
|
2
2
|
import os
|
|
3
3
|
import sys
|
|
4
|
-
import datetime
|
|
5
4
|
|
|
6
5
|
from typing import Any, Dict
|
|
7
6
|
|
|
8
|
-
def handle_error(e: Exception) -> None:
|
|
9
|
-
import traceback
|
|
10
|
-
|
|
11
|
-
logging.error("An error occurred: %s", str(e))
|
|
12
|
-
logging.error("Traceback details:\n%s", traceback.format_exc())
|
|
13
|
-
raise (e) from e
|
|
14
|
-
|
|
15
7
|
def read_file(file_path: str) -> str:
|
|
16
8
|
if not os.path.exists(file_path):
|
|
17
9
|
logging.error(f"File '{file_path}' not found")
|
xgae/utils/setup_env.py
CHANGED
|
@@ -3,91 +3,76 @@ import os
|
|
|
3
3
|
|
|
4
4
|
from langfuse import Langfuse
|
|
5
5
|
|
|
6
|
-
_log_initialized = False
|
|
7
|
-
|
|
8
6
|
def setup_logging() -> None:
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
from dotenv import load_dotenv
|
|
13
|
-
load_dotenv()
|
|
14
|
-
|
|
15
|
-
env_log_level = os.getenv("LOG_LEVEL", "INFO")
|
|
16
|
-
env_log_file = os.getenv("LOG_FILE", "log/xga.log")
|
|
17
|
-
log_level = getattr(logging, env_log_level.upper(), logging.INFO)
|
|
18
|
-
|
|
19
|
-
log_dir = os.path.dirname(env_log_file)
|
|
20
|
-
if log_dir and not os.path.exists(log_dir):
|
|
21
|
-
os.makedirs(log_dir, exist_ok=True)
|
|
22
|
-
else:
|
|
23
|
-
os.remove(env_log_file)
|
|
24
|
-
|
|
25
|
-
logger = logging.getLogger()
|
|
26
|
-
for handler in logger.handlers[:]:
|
|
27
|
-
logger.removeHandler(handler)
|
|
7
|
+
import colorlog
|
|
8
|
+
from dotenv import load_dotenv
|
|
9
|
+
load_dotenv()
|
|
28
10
|
|
|
11
|
+
env_log_level = os.getenv("LOG_LEVEL", "INFO")
|
|
12
|
+
env_log_file = os.getenv("LOG_FILE", "log/xga.log")
|
|
13
|
+
log_level = getattr(logging, env_log_level.upper(), logging.INFO)
|
|
29
14
|
|
|
15
|
+
log_dir = os.path.dirname(env_log_file)
|
|
16
|
+
if log_dir and not os.path.exists(log_dir):
|
|
17
|
+
os.makedirs(log_dir, exist_ok=True)
|
|
18
|
+
else:
|
|
19
|
+
os.remove(env_log_file)
|
|
30
20
|
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
'WARNING': 'yellow',
|
|
35
|
-
'ERROR': 'red',
|
|
36
|
-
'CRITICAL': 'red,bg_white'
|
|
37
|
-
}
|
|
21
|
+
logger = logging.getLogger()
|
|
22
|
+
for handler in logger.handlers[:]:
|
|
23
|
+
logger.removeHandler(handler)
|
|
38
24
|
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
25
|
+
log_colors = {
|
|
26
|
+
'DEBUG': 'cyan',
|
|
27
|
+
'INFO': 'green',
|
|
28
|
+
'WARNING': 'yellow',
|
|
29
|
+
'ERROR': 'red',
|
|
30
|
+
'CRITICAL': 'red,bg_white'
|
|
31
|
+
}
|
|
43
32
|
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
33
|
+
console_formatter = colorlog.ColoredFormatter('%(log_color)s%(asctime)s - %(levelname)-8s%(reset)s %(white)s%(message)s',
|
|
34
|
+
log_colors=log_colors,
|
|
35
|
+
datefmt='%Y-%m-%d %H:%M:%S'
|
|
36
|
+
)
|
|
48
37
|
|
|
49
|
-
|
|
50
|
-
|
|
38
|
+
file_formatter = logging.Formatter(
|
|
39
|
+
'%(asctime)s -%(levelname)-8s %(message)s',
|
|
40
|
+
datefmt='%Y-%m-%d %H:%M:%S'
|
|
41
|
+
)
|
|
51
42
|
|
|
52
|
-
|
|
53
|
-
|
|
43
|
+
console_handler = logging.StreamHandler()
|
|
44
|
+
console_handler.setFormatter(console_formatter)
|
|
54
45
|
|
|
55
|
-
|
|
56
|
-
|
|
46
|
+
file_handler = logging.FileHandler(env_log_file, encoding='utf-8')
|
|
47
|
+
file_handler.setFormatter(file_formatter)
|
|
57
48
|
|
|
58
|
-
|
|
49
|
+
logger.addHandler(console_handler)
|
|
50
|
+
logger.addHandler(file_handler)
|
|
59
51
|
|
|
60
|
-
|
|
52
|
+
logger.setLevel(log_level)
|
|
61
53
|
|
|
62
|
-
|
|
54
|
+
logging.info(f"Logger is initialized, log_level={env_log_level}, log_file={env_log_file}")
|
|
63
55
|
|
|
64
|
-
setup_logging()
|
|
65
|
-
|
|
66
|
-
_langfuse_initialized = False
|
|
67
56
|
|
|
68
57
|
def setup_langfuse() -> Langfuse:
|
|
69
|
-
global _langfuse_initialized
|
|
70
58
|
_langfuse = None
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
logging.warning("Not set key, Langfuse is disabled!")
|
|
84
|
-
|
|
85
|
-
_langfuse_initialized = True
|
|
86
|
-
return _langfuse
|
|
59
|
+
env_public_key = os.getenv("LANGFUSE_PUBLIC_KEY")
|
|
60
|
+
env_secret_key = os.getenv("LANGFUSE_SECRET_KEY")
|
|
61
|
+
env_host = os.getenv("LANGFUSE_HOST", "https://cloud.langfuse.com")
|
|
62
|
+
if env_public_key and env_secret_key:
|
|
63
|
+
_langfuse = Langfuse(tracing_enabled=True,
|
|
64
|
+
public_key=env_public_key,
|
|
65
|
+
secret_key=env_secret_key,
|
|
66
|
+
host=env_host)
|
|
67
|
+
logging.info("Langfuse initialized Successfully by Key !")
|
|
68
|
+
else:
|
|
69
|
+
_langfuse = Langfuse(tracing_enabled=False)
|
|
70
|
+
logging.warning("Not set key, Langfuse is disabled!")
|
|
87
71
|
|
|
88
|
-
|
|
72
|
+
return _langfuse
|
|
89
73
|
|
|
90
74
|
|
|
91
75
|
if __name__ == "__main__":
|
|
76
|
+
from xgae.utils import langfuse
|
|
92
77
|
trace_id = langfuse.create_trace_id()
|
|
93
78
|
logging.warning(f"trace_id={trace_id}")
|
xgae/utils/xml_tool_parser.py
CHANGED
|
@@ -5,14 +5,11 @@ This module provides a reliable XML tool call parsing system that supports
|
|
|
5
5
|
the XML format with structured function_calls blocks.
|
|
6
6
|
"""
|
|
7
7
|
|
|
8
|
-
import re
|
|
9
|
-
import xml.etree.ElementTree as ET
|
|
10
|
-
from typing import List, Dict, Any, Optional, Tuple
|
|
11
|
-
from dataclasses import dataclass
|
|
12
8
|
import json
|
|
13
9
|
import logging
|
|
14
|
-
|
|
15
|
-
|
|
10
|
+
import re
|
|
11
|
+
from dataclasses import dataclass
|
|
12
|
+
from typing import List, Dict, Any, Optional, Tuple
|
|
16
13
|
|
|
17
14
|
|
|
18
15
|
@dataclass
|
|
@@ -85,7 +82,7 @@ class XMLToolParser:
|
|
|
85
82
|
if tool_call:
|
|
86
83
|
tool_calls.append(tool_call)
|
|
87
84
|
except Exception as e:
|
|
88
|
-
|
|
85
|
+
logging.error(f"Error parsing invoke block for {function_name}: {e}")
|
|
89
86
|
|
|
90
87
|
return tool_calls
|
|
91
88
|
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
xgae/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
|
+
xgae/engine/engine_base.py,sha256=ySERuLy1YWsf-3s0NFKcyTnXQ4g69wR-cQhtnG0OFmU,1747
|
|
3
|
+
xgae/engine/mcp_tool_box.py,sha256=6mdvu9-aquyLJEwebTtpa_bfGmgT1jPszKE90NIpR5c,9852
|
|
4
|
+
xgae/engine/prompt_builder.py,sha256=ygFAIc4p3opIMyl6g1JeBuSiMjNVxwRloKeF2eX8R5I,4354
|
|
5
|
+
xgae/engine/task_engine.py,sha256=xxAWtPfKgSpf6L7wOc243U-7YP8AC2WYoCI-FUdDpOc,18132
|
|
6
|
+
xgae/engine/responser/non_stream_responser.py,sha256=QEFE4JGYVaIbFeMUMJa1Mt1uBblU_hAOywAhyp9V1k4,6634
|
|
7
|
+
xgae/engine/responser/responser_base.py,sha256=aHKJ880B1ezfBWzyHoOSNVDb-CJY4ujH2MGm61aJLy8,31468
|
|
8
|
+
xgae/engine/responser/stream_responser.py,sha256=5KzCHApiPplZ-zN_sbbEbSvj2rtvKWBshJKe_-x7RDI,52927
|
|
9
|
+
xgae/utils/__init__.py,sha256=jChvD-p_p5gsrCZUVYPUGJs4CS9gIdNFcSOpkRpcM4Y,317
|
|
10
|
+
xgae/utils/json_helpers.py,sha256=K1ja6GJCatrAheW9bEWAYSQbDI42__boBCZgtsv1gtk,4865
|
|
11
|
+
xgae/utils/llm_client.py,sha256=mgzn8heUyRm92HTLEYGdfsGEpFtD-xLFr39P98_JP0s,12402
|
|
12
|
+
xgae/utils/misc.py,sha256=EK94YesZp8AmRUqWfN-CjTxyEHPWdIIWpFNO17dzm9g,915
|
|
13
|
+
xgae/utils/setup_env.py,sha256=Nc0HCQOnK-EGNLTWCQ9-iYysNRdIvwGhcHdqpNeV910,2407
|
|
14
|
+
xgae/utils/xml_tool_parser.py,sha256=EJ6BjpD4CSdmS_LqViUJ6P8H9GY2R1e4Dh8rLCR6nSE,7474
|
|
15
|
+
xgae-0.1.6.dist-info/METADATA,sha256=Q5OiPe5W3H7ym2TDPaM1x3k6jSTIol3QDyWI0dsQetw,309
|
|
16
|
+
xgae-0.1.6.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
17
|
+
xgae-0.1.6.dist-info/RECORD,,
|