xgae 0.1.17__tar.gz → 0.1.19__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of xgae might be problematic. Click here for more details.

Files changed (49) hide show
  1. xgae-0.1.17/release.md → xgae-0.1.19/CHANGELOG.md +11 -0
  2. {xgae-0.1.17 → xgae-0.1.19}/PKG-INFO +2 -1
  3. {xgae-0.1.17 → xgae-0.1.19}/mcpservers/custom_servers.json +4 -0
  4. {xgae-0.1.17 → xgae-0.1.19}/pyproject.toml +4 -2
  5. xgae-0.1.19/src/examples/agent/langgraph/react/final_result_agent.py +59 -0
  6. xgae-0.1.19/src/examples/agent/langgraph/react/react_agent.py +327 -0
  7. xgae-0.1.19/src/examples/agent/langgraph/react/run_react_agent.py +62 -0
  8. xgae-0.1.19/src/examples/engine/run_custom_and_agent_tools.py +45 -0
  9. {xgae-0.1.17 → xgae-0.1.19}/src/examples/engine/run_general_tools.py +1 -1
  10. {xgae-0.1.17 → xgae-0.1.19}/src/examples/engine/run_human_in_loop.py +2 -2
  11. {xgae-0.1.17 → xgae-0.1.19}/src/examples/engine/run_simple.py +1 -1
  12. {xgae-0.1.17 → xgae-0.1.19}/src/examples/tools/custom_fault_tools_app.py +14 -15
  13. xgae-0.1.19/src/examples/tools/simu_a2a_tools_app.py +56 -0
  14. xgae-0.1.19/src/xgae/__init__.py +4 -0
  15. {xgae-0.1.17 → xgae-0.1.19}/src/xgae/engine/engine_base.py +4 -1
  16. {xgae-0.1.17 → xgae-0.1.19}/src/xgae/engine/mcp_tool_box.py +32 -19
  17. {xgae-0.1.17 → xgae-0.1.19}/src/xgae/engine/prompt_builder.py +17 -2
  18. {xgae-0.1.17 → xgae-0.1.19}/src/xgae/engine/responser/non_stream_responser.py +1 -1
  19. {xgae-0.1.17 → xgae-0.1.19}/src/xgae/engine/responser/responser_base.py +25 -32
  20. {xgae-0.1.17 → xgae-0.1.19}/src/xgae/engine/responser/stream_responser.py +10 -16
  21. {xgae-0.1.17 → xgae-0.1.19}/src/xgae/engine/task_engine.py +44 -30
  22. {xgae-0.1.17 → xgae-0.1.19}/src/xgae/engine/task_langfuse.py +19 -7
  23. xgae-0.1.17/src/xgae/cli_app.py → xgae-0.1.19/src/xgae/engine_cli_app.py +16 -19
  24. {xgae-0.1.17 → xgae-0.1.19}/src/xgae/tools/without_general_tools_app.py +3 -3
  25. xgae-0.1.19/templates/agent_tool_prompt_template.txt +29 -0
  26. {xgae-0.1.17 → xgae-0.1.19}/templates/example/fault_user_prompt.txt +1 -1
  27. xgae-0.1.19/templates/example/final_result_template.txt +19 -0
  28. {xgae-0.1.17 → xgae-0.1.19}/uv.lock +78 -1
  29. xgae-0.1.17/src/examples/agent/langgraph/react/react_agent.py +0 -205
  30. xgae-0.1.17/src/examples/engine/run_user_prompt.py +0 -30
  31. xgae-0.1.17/src/xgae/__init__.py +0 -4
  32. {xgae-0.1.17 → xgae-0.1.19}/.env +0 -0
  33. {xgae-0.1.17 → xgae-0.1.19}/.python-version +0 -0
  34. {xgae-0.1.17 → xgae-0.1.19}/README.md +0 -0
  35. {xgae-0.1.17 → xgae-0.1.19}/mcpservers/xga_server.json +0 -0
  36. {xgae-0.1.17 → xgae-0.1.19}/mcpservers/xga_server_sse.json +0 -0
  37. {xgae-0.1.17 → xgae-0.1.19}/src/xgae/utils/__init__.py +0 -0
  38. {xgae-0.1.17 → xgae-0.1.19}/src/xgae/utils/json_helpers.py +0 -0
  39. {xgae-0.1.17 → xgae-0.1.19}/src/xgae/utils/llm_client.py +0 -0
  40. {xgae-0.1.17 → xgae-0.1.19}/src/xgae/utils/misc.py +0 -0
  41. {xgae-0.1.17 → xgae-0.1.19}/src/xgae/utils/setup_env.py +0 -0
  42. {xgae-0.1.17 → xgae-0.1.19}/src/xgae/utils/xml_tool_parser.py +0 -0
  43. {xgae-0.1.17 → xgae-0.1.19}/templates/custom_tool_prompt_template.txt +0 -0
  44. {xgae-0.1.17 → xgae-0.1.19}/templates/gemini_system_prompt_template.txt +0 -0
  45. {xgae-0.1.17 → xgae-0.1.19}/templates/general_tool_prompt_template.txt +0 -0
  46. {xgae-0.1.17 → xgae-0.1.19}/templates/system_prompt_response_sample.txt +0 -0
  47. {xgae-0.1.17 → xgae-0.1.19}/templates/system_prompt_template.txt +0 -0
  48. {xgae-0.1.17 → xgae-0.1.19}/test/test_langfuse.py +0 -0
  49. {xgae-0.1.17 → xgae-0.1.19}/test/test_litellm_langfuse.py +0 -0
@@ -1,4 +1,15 @@
1
1
  # Release Changelog
2
+ ## [0.1.19] - 2025-9-8
3
+ ### Added
4
+ - Example: Langgraph react agent release V1, full logic but no final result agent and tool select agent
5
+
6
+
7
+ # Release Changelog
8
+ ## [0.1.18] - 2025-9-3
9
+ ### Added
10
+ - Support Agent tools
11
+
12
+
2
13
  ## [0.1.17] - 2025-9-1
3
14
  ### Target
4
15
  - Saved for XGATaskEngine base version
@@ -1,10 +1,11 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: xgae
3
- Version: 0.1.17
3
+ Version: 0.1.19
4
4
  Summary: Extreme General Agent Engine
5
5
  Requires-Python: >=3.13
6
6
  Requires-Dist: colorlog==6.9.0
7
7
  Requires-Dist: langchain-mcp-adapters==0.1.9
8
+ Requires-Dist: langchain==0.3.27
8
9
  Requires-Dist: langfuse==2.60.9
9
10
  Requires-Dist: langgraph==0.6.5
10
11
  Requires-Dist: litellm==1.74.15
@@ -3,6 +3,10 @@
3
3
  "bomc_fault": {
4
4
  "url": "http://localhost:17070/sse",
5
5
  "transport": "sse"
6
+ },
7
+ "_@_equip_fault_cause": {
8
+ "url": "http://localhost:21010/sse",
9
+ "transport": "sse"
6
10
  }
7
11
  }
8
12
  }
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "xgae"
3
- version = "0.1.17"
3
+ version = "0.1.19"
4
4
  description = "Extreme General Agent Engine"
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.13"
@@ -11,6 +11,7 @@ dependencies = [
11
11
  "litellm==1.74.15",
12
12
  "mcp==1.13.0",
13
13
  "langfuse==2.60.9",
14
+ "langchain==0.3.27",
14
15
  ]
15
16
 
16
17
  [build-system]
@@ -21,6 +22,7 @@ build-backend = "hatchling.build"
21
22
  exclude = ["log/*", ".idea/*"]
22
23
 
23
24
  [project.scripts]
24
- xgae = "xgae.cli_app:main"
25
+ xgae = "xgae.engine_cli_app:main"
25
26
  xgae-tools = "xgae.tools.without_general_tools_app:main"
26
27
  example-fault-tools = "examples.tools.custom_fault_tools_app:main"
28
+ example-a2a-tools = "examples.tools.simu_a2a_tools_app:main"
@@ -0,0 +1,59 @@
1
+ import json
2
+ import logging
3
+ import re
4
+ from typing import Any, Dict, List
5
+
6
+ from xgae.utils.misc import read_file
7
+ from xgae.utils.llm_client import LLMClient, LangfuseMetadata
8
+
9
+ class FinalResultAgent:
10
+ def __init__(self):
11
+ self.model_client = LLMClient()
12
+ self.prompt_template: str = read_file("templates/example/final_result_template.txt")
13
+
14
+
15
+ async def final_result(self, user_request: str, task_results: str, langfuse_metadata:LangfuseMetadata=None)-> Dict[str, Any]:
16
+ prompt = self.prompt_template.replace("{user_request}", user_request)
17
+ prompt = prompt.replace("{task_results}", task_results)
18
+
19
+ messages = [{"role": "user", "content": prompt}]
20
+
21
+ response_text: str = ""
22
+ response = await self.model_client.create_completion(
23
+ messages,
24
+ langfuse_metadata
25
+ )
26
+ if self.model_client.is_stream:
27
+ async for chunk in response:
28
+ choices = chunk.get("choices", [{}])
29
+ if not choices:
30
+ continue
31
+ delta = choices[0].get("delta", {})
32
+ content = delta.get("content", "")
33
+ if content:
34
+ response_text += content
35
+ else:
36
+ response_text = response.choices[0].message.content
37
+
38
+ cleaned_text = re.sub(r'^\s*```json|```\s*$', '', response_text, flags=re.MULTILINE).strip()
39
+ final_result = json.loads(cleaned_text)
40
+ return final_result
41
+
42
+
43
+ if __name__ == "__main__":
44
+ import asyncio
45
+ from xgae.utils.setup_env import setup_logging
46
+ setup_logging()
47
+
48
+ async def main():
49
+ final_result_agent = FinalResultAgent()
50
+
51
+ user_input = "locate 10.2.3.4 fault and solution"
52
+ answer = ("Task Summary: The fault for IP 10.2.3.4 was identified as a Business Recharge Fault (Code: F01), "
53
+ "caused by a Phone Recharge Application Crash. The solution applied was to restart the application. "
54
+ "Key Deliverables: Fault diagnosis and resolution steps. Impact Achieved: Service restored.")
55
+ return await final_result_agent.final_result(user_input, answer)
56
+
57
+
58
+ final_result = asyncio.run(main())
59
+ print(f"FINAL_RESULT: {final_result} ")
@@ -0,0 +1,327 @@
1
+ import logging
2
+ import os
3
+
4
+ from typing import Any, Dict, List, Annotated, Sequence, TypedDict, Optional, AsyncGenerator
5
+ from uuid import uuid4
6
+
7
+ from langfuse.callback import CallbackHandler
8
+ from langfuse import Langfuse
9
+
10
+ from langchain_core.messages import BaseMessage, HumanMessage, AIMessage
11
+
12
+ from langgraph.graph import END, START, StateGraph
13
+ from langgraph.types import interrupt, Command
14
+ from langgraph.checkpoint.memory import MemorySaver
15
+ from langgraph.graph.message import add_messages
16
+ from langgraph.config import get_stream_writer
17
+
18
+ from xgae.utils.misc import read_file
19
+ from xgae.utils import log_trace
20
+
21
+ from xgae.engine.engine_base import XGATaskResult, XGAResponseMessage
22
+ from xgae.engine.mcp_tool_box import XGAMcpToolBox
23
+ from xgae.engine.task_engine import XGATaskEngine
24
+
25
+
26
+ class AgentContext(TypedDict, total=False):
27
+ task_id: str
28
+ session_id: str
29
+ user_id: str
30
+ agent_id: str
31
+ thread_id: str
32
+
33
+
34
+ class TaskState(TypedDict, total=False):
35
+ """State definition for the agent orchestration graph"""
36
+ messages: Annotated[Sequence[BaseMessage], add_messages] # for message persistent
37
+ user_input: str
38
+ next_node: str
39
+ system_prompt: str
40
+ custom_tools: List[str]
41
+ general_tools: List[str]
42
+ task_result: XGATaskResult
43
+ final_result: XGATaskResult
44
+ iteration_count: int
45
+ agent_context: AgentContext
46
+
47
+
48
+ class XGAReactAgent:
49
+ MAX_TASK_RETRY = 2
50
+
51
+ def __init__(self):
52
+ self.tool_box = XGAMcpToolBox(custom_mcp_server_file="mcpservers/custom_servers.json")
53
+ self.graph = None
54
+ self.graph_config = None
55
+ self.graph_langfuse = Langfuse(enabled=False)
56
+
57
+ self.task_engine: XGATaskEngine = None
58
+
59
+ async def _create_graph(self) -> StateGraph:
60
+ try:
61
+ graph_builder = StateGraph(TaskState)
62
+
63
+ # Add nodes
64
+ graph_builder.add_node('supervisor', self._supervisor_node)
65
+ graph_builder.add_node('select_tool', self._select_tool_node)
66
+ graph_builder.add_node('exec_task', self._exec_task_node)
67
+ graph_builder.add_node('final_result', self._final_result_node)
68
+
69
+ # Add edges
70
+ graph_builder.add_edge(START, 'supervisor')
71
+ graph_builder.add_conditional_edges(
72
+ 'supervisor',
73
+ self._next_condition,
74
+ {
75
+ 'select_tool': 'select_tool',
76
+ 'exec_task': 'exec_task',
77
+ 'end': END
78
+ }
79
+ )
80
+
81
+ graph_builder.add_edge('select_tool', 'exec_task')
82
+ graph_builder.add_edge('exec_task', 'final_result')
83
+
84
+ graph_builder.add_conditional_edges(
85
+ 'final_result',
86
+ self._next_condition,
87
+ {
88
+ 'supervisor': 'supervisor',
89
+ 'exec_task': 'exec_task',
90
+ 'end': END
91
+ }
92
+ )
93
+
94
+ graph = graph_builder.compile(checkpointer=MemorySaver())
95
+ graph.name = "XGARectAgentGraph"
96
+
97
+ return graph
98
+ except Exception as e:
99
+ logging.error("Failed to create XGARectAgent Graph: %s", str(e))
100
+ raise
101
+
102
+ def _search_system_prompt(self, user_input: str) -> str:
103
+ # You should search RAG use user_input, fetch COT or Prompt for your business
104
+ system_prompt = None if "fault" not in user_input else read_file("templates/example/fault_user_prompt.txt")
105
+ return system_prompt
106
+
107
+ async def _supervisor_node(self, state: TaskState) -> Dict[str, Any]:
108
+ user_input = state.get("user_input", "")
109
+ system_prompt = self._search_system_prompt(user_input)
110
+
111
+ general_tools = [] if system_prompt else ["*"]
112
+ custom_tools = ["*"] if system_prompt else []
113
+
114
+ next_node = "select_tool" if system_prompt else "exec_task"
115
+
116
+ return {
117
+ 'system_prompt' : system_prompt,
118
+ 'next_node' : next_node,
119
+ 'general_tools' : general_tools,
120
+ 'custom_tools' : custom_tools,
121
+ }
122
+
123
+ def _select_custom_tools(self, system_prompt: str) -> list[str]:
124
+ custom_tools = ["*"] if system_prompt else []
125
+ return custom_tools
126
+
127
+ async def _select_tool_node(self, state: TaskState) -> Dict[str, Any]:
128
+ system_prompt = state.get('system_prompt',None)
129
+ general_tools = []
130
+ custom_tools = self._select_custom_tools(system_prompt)
131
+ return {
132
+ 'general_tools' : general_tools,
133
+ 'custom_tools' : custom_tools,
134
+ }
135
+
136
+ async def _exec_task_node(self, state: TaskState) -> Dict[str, Any]:
137
+ user_input = state['user_input']
138
+ system_prompt = state.get('system_prompt',None)
139
+ general_tools = state.get('general_tools',[])
140
+ custom_tools = state.get('custom_tools',[])
141
+ is_system_prompt = True if system_prompt is not None else False
142
+
143
+ trace_id = self.graph_langfuse.get_trace_id()
144
+ try:
145
+ logging.info(f"🔥 XGATaskEngine run_task: user_input={user_input}, general_tools={general_tools}, "
146
+ f"custom_tools={custom_tools}, is_system_prompt={is_system_prompt}")
147
+ if self.task_engine is None:
148
+ self.task_engine = XGATaskEngine(
149
+ task_id = state['agent_context']['task_id'],
150
+ session_id=state['agent_context'].get('session_id', None),
151
+ user_id = state['agent_context'].get('user_id', None),
152
+ agent_id = state['agent_context'].get('agent_id', None),
153
+ tool_box = self.tool_box,
154
+ general_tools = general_tools,
155
+ custom_tools = custom_tools,
156
+ system_prompt = system_prompt
157
+ )
158
+
159
+ chunks = []
160
+ stream_writer = get_stream_writer()
161
+ async for chunk in self.task_engine.run_task(task_input={"role": "user", "content": user_input},
162
+ trace_id=trace_id):
163
+ chunks.append(chunk)
164
+ stream_writer({"engine_message": chunk})
165
+
166
+ task_result = self.task_engine.parse_final_result(chunks)
167
+ except Exception as e:
168
+ logging.error(f"XReactAgent exec_task_node: Failed to execute task: {e}")
169
+ task_result = XGATaskResult(type="error", content="Failed to execute task")
170
+
171
+ iteration_count = state.get('iteration_count', 0) + 1
172
+ return {
173
+ 'task_result' : task_result,
174
+ 'iteration_count': iteration_count,
175
+ }
176
+
177
+ async def _final_result_node(self, state: TaskState) -> Dict[str, Any]:
178
+ user_input = state['user_input']
179
+ iteration_count = state['iteration_count']
180
+ task_result = state['task_result']
181
+
182
+ next_node = "end"
183
+ if task_result['type'] == "error" and iteration_count < self.MAX_TASK_RETRY:
184
+ next_node = "supervisor"
185
+ final_result = None
186
+ elif task_result['type'] == "ask":
187
+ final_result = task_result
188
+ logging.info(f"XReactAgent final_result_node: ASK_USER_QUESTION: {task_result['content']}")
189
+ user_input = interrupt({
190
+ 'final_result' : final_result
191
+ })
192
+ logging.info(f"XReactAgent final_result_node: ASK_USER_ANSWER: {user_input}")
193
+ next_node = "exec_task"
194
+ else:
195
+ final_result = task_result
196
+
197
+ return {
198
+ 'user_input' : user_input,
199
+ 'next_node' : next_node,
200
+ 'final_result' : final_result,
201
+ }
202
+
203
+
204
+ def _next_condition(self, state: TaskState) -> str:
205
+ next_node = state['next_node']
206
+ return next_node
207
+
208
+
209
+ async def generate_with_result(self, user_input: str,
210
+ agent_context: Optional[AgentContext] = None,
211
+ is_resume: Optional[bool]=False) -> XGATaskResult:
212
+ agent_context = agent_context or {}
213
+ try:
214
+
215
+ if is_resume:
216
+ logging.info(f"=== Start React Agent for USER_ASK_ANSWER: {user_input}")
217
+ graph_input = Command(resume=user_input)
218
+ else:
219
+ logging.info(f"=== Start React Agent for USER_INPUT: {user_input}")
220
+ graph_input = await self._prepare_graph_start(user_input, agent_context)
221
+
222
+ final_state = await self.graph.ainvoke(graph_input, config=self.graph_config)
223
+
224
+ if "__interrupt__" in final_state:
225
+ interrupt_event = final_state["__interrupt__"][0]
226
+ interrupt_value = interrupt_event.value
227
+ result = interrupt_value['final_result']
228
+ else:
229
+ result = final_state['final_result']
230
+
231
+ return result
232
+ except Exception as e:
233
+ log_trace(e, f"XReactAgent generate: user_input={user_input}")
234
+ result = XGATaskResult(type="error", content=f"React Agent error: {e}")
235
+ return result
236
+
237
+
238
+ async def generate(self, user_input: str,
239
+ agent_context: Optional[AgentContext]=None,
240
+ is_resume: Optional[bool]=False) -> AsyncGenerator[Dict[str, Any], None]:
241
+ agent_context = agent_context or {}
242
+ try:
243
+ if is_resume:
244
+ logging.info(f"=== Start React Stream Agent for USER_ASK_ANSWER: {user_input}")
245
+ graph_input = Command(resume=user_input)
246
+ else:
247
+ logging.info(f"=== Start React Stream Agent USER_ASK_ANSWER: {user_input}")
248
+ graph_input = await self._prepare_graph_start(user_input, agent_context)
249
+
250
+ async for msg_type, message in self.graph.astream(input=graph_input,
251
+ config=self.graph_config,
252
+ stream_mode=["custom", "updates"]):
253
+ if msg_type == "updates" and '__interrupt__' in message:
254
+ interrupt_event = message["__interrupt__"][0]
255
+ interrupt_value = interrupt_event.value
256
+ final_result = interrupt_value['final_result']
257
+ yield final_result
258
+ elif msg_type == "updates" and 'final_result' in message:
259
+ message = message['final_result']
260
+ final_result = message.get('final_result', None)
261
+ if final_result:
262
+ yield final_result
263
+ elif msg_type == "custom" and 'engine_message' in message:
264
+ message = {'type': "message", 'content': message['engine_message']}
265
+ yield message
266
+
267
+ except Exception as e:
268
+ log_trace(e, f"XReactAgent generate: user_input={user_input}")
269
+ yield XGATaskResult(type="error", content=f"React Agent error: {e}")
270
+
271
+
272
+ async def _prepare_graph_start(self, user_input, agent_context: AgentContext):
273
+ if self.graph is None:
274
+ self.graph = await self._create_graph()
275
+
276
+ agent_context = agent_context or {}
277
+ task_id = agent_context.get("task_id", f"xga_task_{uuid4()}")
278
+ agent_context["task_id"] = task_id
279
+ thread_id = agent_context.get('thread_id', task_id)
280
+ agent_context['thread_id'] = thread_id
281
+ session_id = agent_context.get('session_id', task_id)
282
+ agent_context['session_id'] = session_id
283
+
284
+ graph_input = {
285
+ 'messages' : [HumanMessage(content=f"information for: {user_input}")],
286
+ 'user_input' : user_input,
287
+ 'next_node' : None,
288
+ 'agent_context' : agent_context,
289
+ 'iteration_count' : 0
290
+ }
291
+
292
+ langfuse_handler = self._get_langfuse_handler(agent_context)
293
+ callbacks = None
294
+ if langfuse_handler:
295
+ callbacks = [langfuse_handler]
296
+ self.graph_langfuse = langfuse_handler.langfuse
297
+
298
+
299
+ self.graph_config = {
300
+ 'recursion_limit': 100,
301
+ 'configurable': {
302
+ 'thread_id': thread_id
303
+ },
304
+ 'callbacks': callbacks
305
+ }
306
+
307
+ return graph_input
308
+
309
+
310
+ def _get_langfuse_handler(self, agent_context: AgentContext)->CallbackHandler:
311
+ langfuse_handler = None
312
+ public_key = os.getenv("LANGFUSE_PUBLIC_KEY")
313
+ secret_key = os.getenv("LANGFUSE_SECRET_KEY")
314
+ host = os.getenv("LANGFUSE_HOST", "https://cloud.langfuse.com")
315
+
316
+ if public_key and secret_key:
317
+ langfuse_handler = CallbackHandler(
318
+ public_key = public_key,
319
+ secret_key = secret_key,
320
+ host = host,
321
+ trace_name = "xga_react_agent",
322
+ session_id = agent_context.get('session_id', None),
323
+ user_id = agent_context.get('user_id', None),
324
+ )
325
+ return langfuse_handler
326
+
327
+
@@ -0,0 +1,62 @@
1
+ import asyncio
2
+
3
+ from xgae.utils.setup_env import setup_logging
4
+
5
+ from examples.agent.langgraph.react.react_agent import XGAReactAgent, AgentContext
6
+
7
+
8
+ async def main():
9
+ is_stream = True # two mode agent experience
10
+ task_no = 0
11
+ user_inputs = [
12
+ # "5+5", # For no tool call
13
+ # "locate 10.2.3.4 fault and solution", # For custom tool
14
+ "locate fault and solution", # For human append input
15
+ ]
16
+
17
+ for user_input in user_inputs:
18
+ agent = XGAReactAgent()
19
+ task_no += 1
20
+ context: AgentContext = {
21
+ 'task_id': f"agent_task_{task_no}",
22
+ 'user_id': f"agent_user_{task_no}",
23
+ 'agent_id': f"agent_{task_no}",
24
+ }
25
+
26
+ is_resume = False
27
+ auto_continue = True
28
+ while auto_continue:
29
+ if is_stream:
30
+ print(f"*** START AGENT : RUN generate USER_INPUT={user_input}")
31
+ async for chunk in agent.generate(user_input, context, is_resume):
32
+ type = chunk['type']
33
+ if type == "error" or type == "answer":
34
+ await asyncio.sleep(1)
35
+ print(f"FINAL_RESULT: {chunk}")
36
+ auto_continue = False
37
+ elif type == "ask":
38
+ print(f"ASK_USER: {chunk}")
39
+ user_input = "17.0.0.1"
40
+ is_resume = True
41
+ auto_continue = True
42
+ else:
43
+ print(f"RESULT_CHUNK: {chunk}")
44
+ auto_continue = False
45
+ else:
46
+ print(f"*** START AGENT : RUN generate_with_result USER_INPUT={user_input}")
47
+ result = await agent.generate_with_result(user_input, context, is_resume)
48
+ await asyncio.sleep(1)
49
+ type = result['type']
50
+ if type == "error" or type == "answer":
51
+ print(f"FINAL_RESULT: {result}")
52
+ auto_continue = False
53
+ elif type == "ask":
54
+ print(f"ASK_USER: {result}")
55
+ user_input = "18.0.0.1"
56
+ is_resume = True
57
+ auto_continue = True
58
+
59
+
60
+ if __name__ == "__main__":
61
+ setup_logging()
62
+ asyncio.run(main())
@@ -0,0 +1,45 @@
1
+ import asyncio
2
+
3
+ from xgae.engine.mcp_tool_box import XGAMcpToolBox
4
+ from xgae.engine.task_engine import XGATaskEngine
5
+ from xgae.utils.misc import read_file
6
+
7
+ from xgae.utils.setup_env import setup_logging
8
+
9
+
10
+ is_stream = False
11
+ if is_stream:
12
+ setup_logging(log_level="ERROR") # only show chunk
13
+ else:
14
+ setup_logging()
15
+
16
+ # Before Run Exec: uv run example-fault-tools --alarmtype=2 , uv run example-a2a-tools
17
+ # If want to use real A2A agent tool, use xga-agent-tool project
18
+
19
+ async def main() -> None:
20
+ tool_box = XGAMcpToolBox(custom_mcp_server_file="mcpservers/custom_servers.json")
21
+ system_prompt = read_file("templates/example/fault_user_prompt.txt")
22
+
23
+ engine = XGATaskEngine(tool_box=tool_box,
24
+ general_tools=[],
25
+ custom_tools=["*"],
26
+ system_prompt=system_prompt)
27
+
28
+
29
+ user_input = "locate 10.2.3.4 fault and solution"
30
+ global is_stream
31
+ if is_stream:
32
+ chunks = []
33
+ async for chunk in engine.run_task(task_input={"role": "user", "content": user_input}):
34
+ chunks.append(chunk)
35
+ print(chunk)
36
+
37
+ final_result = engine.parse_final_result(chunks)
38
+ print(f"\n\nFINAL_RESULT: {final_result}")
39
+ else:
40
+ final_result = await engine.run_task_with_final_answer(task_input={"role": "user", "content": user_input})
41
+ print(f"\n\nFINAL_RESULT: {final_result}")
42
+
43
+
44
+
45
+ asyncio.run(main())
@@ -10,7 +10,7 @@ async def main() -> None:
10
10
  engine = XGATaskEngine(general_tools=["*"])
11
11
 
12
12
  user_input = "This week's gold price"
13
- final_result = await engine.run_task_with_final_answer(task_message={"role": "user", "content": user_input})
13
+ final_result = await engine.run_task_with_final_answer(task_input={"role": "user", "content": user_input})
14
14
  print("FINAL RESULT:", final_result)
15
15
 
16
16
  asyncio.run(main())
@@ -25,7 +25,7 @@ async def main() -> None:
25
25
 
26
26
  user_input = "locate fault and solution"
27
27
  final_result = await engine.run_task_with_final_answer(
28
- task_message={"role": "user", "content": user_input},
28
+ task_input={"role": "user", "content": user_input},
29
29
  trace_id=trace_id
30
30
  )
31
31
  print("FINAL RESULT:", final_result)
@@ -34,7 +34,7 @@ async def main() -> None:
34
34
  print("====== Wait for user input ... ======")
35
35
  user_input = "ip=10.0.1.1"
36
36
  final_result = await engine.run_task_with_final_answer(
37
- task_message={"role": "user", "content": user_input},
37
+ task_input={"role": "user", "content": user_input},
38
38
  trace_id=trace_id
39
39
  )
40
40
  print("FINAL RESULT:", final_result)
@@ -10,7 +10,7 @@ async def main() -> None:
10
10
  engine = XGATaskEngine()
11
11
 
12
12
  final_result = await engine.run_task_with_final_answer(
13
- task_message={"role": "user", "content": "1+7"}
13
+ task_input={"role": "user", "content": "1+7"}
14
14
  )
15
15
 
16
16
  print("FINAL RESULT:", final_result)
@@ -1,8 +1,7 @@
1
1
  import click
2
2
  import logging
3
3
 
4
- from typing import Annotated
5
- from typing import Dict, Any
4
+ from typing import Annotated, Dict, Any
6
5
  from pydantic import Field
7
6
 
8
7
  from mcp.server.fastmcp import FastMCP
@@ -65,19 +64,19 @@ async def get_busi_fault_cause(fault_code: Annotated[str, Field(description="Fau
65
64
  return fault_cause
66
65
 
67
66
 
68
- @mcp.tool(
69
- description="Get Equipment Type Fault Solution and Cause",
70
- )
71
- async def get_equip_fault_cause(fault_code: Annotated[str, Field(description="Fault Code")]) -> str:
72
- logging.info(f"get_equip_fault_cause: faultCode={fault_code}")
73
-
74
- fault_cause = ""
75
- if (fault_code == 'F01'):
76
- fault_cause = "Host Fault, Fault Cause is 'Host Disk is Damaged' ,Solution is 'Change Host Disk'"
77
- else:
78
- fault_cause = f"FaultCode '{fault_code}' is not Equipment Type"
79
-
80
- return fault_cause
67
+ # @mcp.tool(
68
+ # description="Get Equipment Type Fault Solution and Cause",
69
+ # )
70
+ # async def query_equip_fault_cause(fault_code: Annotated[str, Field(description="Fault Code")]) -> str:
71
+ # logging.info(f"get_equip_fault_cause: faultCode={fault_code}")
72
+ #
73
+ # fault_cause = ""
74
+ # if (fault_code == 'F02'):
75
+ # fault_cause = "Host Fault, Fault Cause is 'Host Disk is Damaged' ,Solution is 'Change Host Disk'"
76
+ # else:
77
+ # fault_cause = f"FaultCode '{fault_code}' is not Equipment Type"
78
+ #
79
+ # return fault_cause
81
80
 
82
81
 
83
82
  @click.command()